var/home/core/zuul-output/0000755000175000017500000000000015111615453014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111643553015475 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000007032765215111643543017714 0ustar rootrootNov 26 15:26:18 crc systemd[1]: Starting Kubernetes Kubelet... Nov 26 15:26:18 crc restorecon[4750]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:18 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 15:26:19 crc restorecon[4750]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 26 15:26:19 crc kubenswrapper[5010]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 15:26:19 crc kubenswrapper[5010]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 26 15:26:19 crc kubenswrapper[5010]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 15:26:19 crc kubenswrapper[5010]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 15:26:19 crc kubenswrapper[5010]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 26 15:26:19 crc kubenswrapper[5010]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.631061 5010 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637186 5010 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637223 5010 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637234 5010 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637249 5010 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637266 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637278 5010 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637291 5010 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637303 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637314 5010 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637325 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637335 5010 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637345 5010 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637354 5010 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637364 5010 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637377 5010 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637391 5010 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637431 5010 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637441 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637451 5010 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637461 5010 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637471 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637481 5010 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637488 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637497 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637504 5010 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637512 5010 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637520 5010 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637527 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637535 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637546 5010 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637555 5010 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637563 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637571 5010 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637580 5010 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637589 5010 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637600 5010 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637610 5010 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637619 5010 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637628 5010 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637637 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637646 5010 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637655 5010 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637665 5010 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637676 5010 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637686 5010 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637696 5010 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637739 5010 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637749 5010 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637757 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637767 5010 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637775 5010 feature_gate.go:330] unrecognized feature gate: Example Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637786 5010 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637794 5010 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637802 5010 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637810 5010 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637819 5010 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637827 5010 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637836 5010 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637844 5010 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637852 5010 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637860 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637868 5010 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637875 5010 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637886 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637894 5010 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637901 5010 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637909 5010 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637919 5010 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637926 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637935 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.637942 5010 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639235 5010 flags.go:64] FLAG: --address="0.0.0.0" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639261 5010 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639276 5010 flags.go:64] FLAG: --anonymous-auth="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639288 5010 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639303 5010 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639315 5010 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639332 5010 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639347 5010 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639358 5010 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639368 5010 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639381 5010 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639394 5010 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639404 5010 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639416 5010 flags.go:64] FLAG: --cgroup-root="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639428 5010 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639441 5010 flags.go:64] FLAG: --client-ca-file="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639452 5010 flags.go:64] FLAG: --cloud-config="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639465 5010 flags.go:64] FLAG: --cloud-provider="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639477 5010 flags.go:64] FLAG: --cluster-dns="[]" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639497 5010 flags.go:64] FLAG: --cluster-domain="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639510 5010 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639523 5010 flags.go:64] FLAG: --config-dir="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639537 5010 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639550 5010 flags.go:64] FLAG: --container-log-max-files="5" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639565 5010 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639576 5010 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639588 5010 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639600 5010 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639612 5010 flags.go:64] FLAG: --contention-profiling="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639624 5010 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639634 5010 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639644 5010 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639656 5010 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639668 5010 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639678 5010 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639687 5010 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639696 5010 flags.go:64] FLAG: --enable-load-reader="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639736 5010 flags.go:64] FLAG: --enable-server="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639747 5010 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639762 5010 flags.go:64] FLAG: --event-burst="100" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639775 5010 flags.go:64] FLAG: --event-qps="50" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639787 5010 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639799 5010 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639811 5010 flags.go:64] FLAG: --eviction-hard="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639825 5010 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639836 5010 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639845 5010 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639855 5010 flags.go:64] FLAG: --eviction-soft="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639865 5010 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639874 5010 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639883 5010 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639892 5010 flags.go:64] FLAG: --experimental-mounter-path="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639901 5010 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639911 5010 flags.go:64] FLAG: --fail-swap-on="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639921 5010 flags.go:64] FLAG: --feature-gates="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639932 5010 flags.go:64] FLAG: --file-check-frequency="20s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639941 5010 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639951 5010 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639960 5010 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639970 5010 flags.go:64] FLAG: --healthz-port="10248" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639979 5010 flags.go:64] FLAG: --help="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639988 5010 flags.go:64] FLAG: --hostname-override="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.639996 5010 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640005 5010 flags.go:64] FLAG: --http-check-frequency="20s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640015 5010 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640023 5010 flags.go:64] FLAG: --image-credential-provider-config="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640032 5010 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640041 5010 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640052 5010 flags.go:64] FLAG: --image-service-endpoint="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640063 5010 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640074 5010 flags.go:64] FLAG: --kube-api-burst="100" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640085 5010 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640095 5010 flags.go:64] FLAG: --kube-api-qps="50" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640104 5010 flags.go:64] FLAG: --kube-reserved="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640113 5010 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640122 5010 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640131 5010 flags.go:64] FLAG: --kubelet-cgroups="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640140 5010 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640149 5010 flags.go:64] FLAG: --lock-file="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640159 5010 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640170 5010 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640181 5010 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640198 5010 flags.go:64] FLAG: --log-json-split-stream="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640209 5010 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640219 5010 flags.go:64] FLAG: --log-text-split-stream="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640232 5010 flags.go:64] FLAG: --logging-format="text" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640246 5010 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640258 5010 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640270 5010 flags.go:64] FLAG: --manifest-url="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640281 5010 flags.go:64] FLAG: --manifest-url-header="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640297 5010 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640308 5010 flags.go:64] FLAG: --max-open-files="1000000" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640319 5010 flags.go:64] FLAG: --max-pods="110" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640329 5010 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640338 5010 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640349 5010 flags.go:64] FLAG: --memory-manager-policy="None" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640360 5010 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640372 5010 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640382 5010 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640391 5010 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640413 5010 flags.go:64] FLAG: --node-status-max-images="50" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640422 5010 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640431 5010 flags.go:64] FLAG: --oom-score-adj="-999" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640440 5010 flags.go:64] FLAG: --pod-cidr="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640453 5010 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640471 5010 flags.go:64] FLAG: --pod-manifest-path="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640482 5010 flags.go:64] FLAG: --pod-max-pids="-1" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640495 5010 flags.go:64] FLAG: --pods-per-core="0" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640507 5010 flags.go:64] FLAG: --port="10250" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640518 5010 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640530 5010 flags.go:64] FLAG: --provider-id="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640540 5010 flags.go:64] FLAG: --qos-reserved="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640550 5010 flags.go:64] FLAG: --read-only-port="10255" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640560 5010 flags.go:64] FLAG: --register-node="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640569 5010 flags.go:64] FLAG: --register-schedulable="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640578 5010 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640597 5010 flags.go:64] FLAG: --registry-burst="10" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640609 5010 flags.go:64] FLAG: --registry-qps="5" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640621 5010 flags.go:64] FLAG: --reserved-cpus="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640635 5010 flags.go:64] FLAG: --reserved-memory="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640649 5010 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640659 5010 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640669 5010 flags.go:64] FLAG: --rotate-certificates="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640678 5010 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640687 5010 flags.go:64] FLAG: --runonce="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640696 5010 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640735 5010 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640745 5010 flags.go:64] FLAG: --seccomp-default="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640754 5010 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640763 5010 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640772 5010 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640782 5010 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640794 5010 flags.go:64] FLAG: --storage-driver-password="root" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640805 5010 flags.go:64] FLAG: --storage-driver-secure="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640816 5010 flags.go:64] FLAG: --storage-driver-table="stats" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640825 5010 flags.go:64] FLAG: --storage-driver-user="root" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640834 5010 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640843 5010 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640852 5010 flags.go:64] FLAG: --system-cgroups="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640861 5010 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640877 5010 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640887 5010 flags.go:64] FLAG: --tls-cert-file="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640896 5010 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640909 5010 flags.go:64] FLAG: --tls-min-version="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640920 5010 flags.go:64] FLAG: --tls-private-key-file="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640930 5010 flags.go:64] FLAG: --topology-manager-policy="none" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640941 5010 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640951 5010 flags.go:64] FLAG: --topology-manager-scope="container" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640960 5010 flags.go:64] FLAG: --v="2" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640973 5010 flags.go:64] FLAG: --version="false" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640984 5010 flags.go:64] FLAG: --vmodule="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.640995 5010 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.641012 5010 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641262 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641276 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641286 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641298 5010 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641309 5010 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641319 5010 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641329 5010 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641338 5010 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641345 5010 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641354 5010 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641362 5010 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641372 5010 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641381 5010 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641391 5010 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641401 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641413 5010 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641423 5010 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641432 5010 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641443 5010 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641453 5010 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641464 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641474 5010 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641483 5010 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641493 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641501 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641510 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641518 5010 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641526 5010 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641534 5010 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641541 5010 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641551 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641561 5010 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641569 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641576 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641584 5010 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641592 5010 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641600 5010 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641611 5010 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641620 5010 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641629 5010 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641639 5010 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641650 5010 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641659 5010 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641669 5010 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641679 5010 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641691 5010 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641701 5010 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641739 5010 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641748 5010 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641758 5010 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641768 5010 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641777 5010 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641785 5010 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641794 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641801 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641809 5010 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641817 5010 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641825 5010 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641832 5010 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641841 5010 feature_gate.go:330] unrecognized feature gate: Example Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641849 5010 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641857 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641866 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641874 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641883 5010 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641892 5010 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641901 5010 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641909 5010 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641917 5010 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641924 5010 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.641933 5010 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.642851 5010 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.657655 5010 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.657742 5010 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658352 5010 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658375 5010 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658380 5010 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658385 5010 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658389 5010 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658392 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658397 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658402 5010 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658406 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658413 5010 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658423 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658430 5010 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658435 5010 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658440 5010 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658443 5010 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658448 5010 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658452 5010 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658455 5010 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658459 5010 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658464 5010 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658469 5010 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658473 5010 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658477 5010 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658481 5010 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658485 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658489 5010 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658493 5010 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658496 5010 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658500 5010 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658504 5010 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658509 5010 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658514 5010 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658519 5010 feature_gate.go:330] unrecognized feature gate: Example Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658522 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658529 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658532 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658536 5010 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658540 5010 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658544 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658547 5010 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658551 5010 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658554 5010 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658558 5010 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658562 5010 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658569 5010 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658573 5010 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658578 5010 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658582 5010 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658586 5010 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658590 5010 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658594 5010 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658598 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658602 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658606 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658609 5010 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658613 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658617 5010 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658621 5010 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658626 5010 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658629 5010 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658633 5010 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658637 5010 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658642 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658646 5010 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658650 5010 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658654 5010 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658658 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658662 5010 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658666 5010 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658670 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658674 5010 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.658682 5010 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658846 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658855 5010 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658860 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658864 5010 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658869 5010 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658873 5010 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658877 5010 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658881 5010 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658885 5010 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658891 5010 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658894 5010 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658898 5010 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658903 5010 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658907 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658910 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658914 5010 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658918 5010 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658922 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658926 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658930 5010 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658934 5010 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658938 5010 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658942 5010 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658946 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658950 5010 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658954 5010 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658958 5010 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658962 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658966 5010 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658969 5010 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658974 5010 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658979 5010 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658985 5010 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658990 5010 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658994 5010 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.658999 5010 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659006 5010 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659010 5010 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659015 5010 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659020 5010 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659025 5010 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659030 5010 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659034 5010 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659038 5010 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659042 5010 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659047 5010 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659051 5010 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659055 5010 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659059 5010 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659063 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659067 5010 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659071 5010 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659075 5010 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659081 5010 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659085 5010 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659090 5010 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659095 5010 feature_gate.go:330] unrecognized feature gate: Example Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659100 5010 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659105 5010 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659109 5010 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659114 5010 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659121 5010 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659125 5010 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659129 5010 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659134 5010 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659138 5010 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659143 5010 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659148 5010 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659153 5010 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659158 5010 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.659162 5010 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.659170 5010 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.659420 5010 server.go:940] "Client rotation is on, will bootstrap in background" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.663576 5010 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.664233 5010 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.665991 5010 server.go:997] "Starting client certificate rotation" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.666014 5010 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.666241 5010 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-09 08:39:35.122775591 +0000 UTC Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.666363 5010 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 1049h13m15.456417499s for next certificate rotation Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.693342 5010 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.696308 5010 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.718127 5010 log.go:25] "Validated CRI v1 runtime API" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.756944 5010 log.go:25] "Validated CRI v1 image API" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.759192 5010 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.765833 5010 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-26-15-21-06-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.765903 5010 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.788110 5010 manager.go:217] Machine: {Timestamp:2025-11-26 15:26:19.785046764 +0000 UTC m=+0.575763942 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:acbf26fa-40c0-4dfa-8770-e9f2cef78fa9 BootID:21620236-c00a-4f13-9fac-891f828aea35 Filesystems:[{Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:cd:d1:52 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:cd:d1:52 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:a4:d0:43 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:74:b7:22 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:31:6e:b9 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:9a:06:0f Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:c3:42:c7 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:9a:1c:14:65:d6:1e Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:56:61:56:73:3a:a2 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.788486 5010 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.788978 5010 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.790584 5010 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.790801 5010 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.790863 5010 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.791107 5010 topology_manager.go:138] "Creating topology manager with none policy" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.791119 5010 container_manager_linux.go:303] "Creating device plugin manager" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.791613 5010 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.791656 5010 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.792398 5010 state_mem.go:36] "Initialized new in-memory state store" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.792986 5010 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.795742 5010 kubelet.go:418] "Attempting to sync node with API server" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.795772 5010 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.795796 5010 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.795823 5010 kubelet.go:324] "Adding apiserver pod source" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.795838 5010 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.801135 5010 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.802538 5010 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.805535 5010 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.806121 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.806282 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.806101 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.806344 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807458 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807512 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807532 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807546 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807571 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807585 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807599 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807621 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807683 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807701 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807764 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.807782 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.808894 5010 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.809867 5010 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.810000 5010 server.go:1280] "Started kubelet" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.811074 5010 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.811073 5010 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.816560 5010 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 26 15:26:19 crc systemd[1]: Started Kubernetes Kubelet. Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.824009 5010 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.824092 5010 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.823388 5010 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.154:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b97fa2db2246e default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 15:26:19.809948782 +0000 UTC m=+0.600665970,LastTimestamp:2025-11-26 15:26:19.809948782 +0000 UTC m=+0.600665970,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.825019 5010 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.828630 5010 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.828700 5010 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.828728 5010 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.829100 5010 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 21:00:30.068244079 +0000 UTC Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.829221 5010 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 677h34m10.239030168s for next certificate rotation Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.829406 5010 server.go:460] "Adding debug handlers to kubelet server" Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.829465 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="200ms" Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.829568 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.829653 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.829960 5010 factory.go:55] Registering systemd factory Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.829986 5010 factory.go:221] Registration of the systemd container factory successfully Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.830430 5010 factory.go:153] Registering CRI-O factory Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.830453 5010 factory.go:221] Registration of the crio container factory successfully Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.830540 5010 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.830576 5010 factory.go:103] Registering Raw factory Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.830605 5010 manager.go:1196] Started watching for new ooms in manager Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.832502 5010 manager.go:319] Starting recovery of all containers Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.848407 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.854625 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.854786 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.854831 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.854861 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.854885 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.854921 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.854967 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855008 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855032 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855063 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855090 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855117 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855153 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855221 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855273 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855323 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855353 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855377 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855406 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855435 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855461 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855497 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855524 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855555 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855579 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855682 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855736 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855768 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855792 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855824 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855851 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855876 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855914 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855944 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855973 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.855999 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856024 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856053 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856076 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856107 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856132 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856160 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856207 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856290 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856325 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856371 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856401 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856435 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856461 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.856491 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.859935 5010 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860029 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860068 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860091 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860112 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860130 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860151 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860169 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860183 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860199 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860214 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860230 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860248 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860262 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860320 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860337 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860353 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860370 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860386 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860402 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860418 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860433 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860462 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860478 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860493 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860510 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860527 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860546 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860564 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860580 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860596 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860615 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860634 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860651 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860668 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860682 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860698 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860733 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860750 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860766 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860782 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860797 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860812 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860827 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860844 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860860 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860893 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860913 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860910 5010 manager.go:324] Recovery completed Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860930 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860947 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860963 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860980 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.860998 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861014 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861044 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861067 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861091 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861110 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861127 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861143 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861161 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861176 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861194 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861210 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861239 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861255 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861270 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861285 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861300 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861315 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861330 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861345 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861364 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861380 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861397 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861413 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861429 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861444 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861461 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861479 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861496 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861513 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861527 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861543 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861558 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861574 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861590 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861605 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861620 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861635 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861650 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861665 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861682 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861699 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861833 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861850 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861866 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861883 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861900 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861917 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861932 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861949 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861964 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861980 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.861996 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862012 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862027 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862044 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862061 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862076 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862092 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862107 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862123 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862138 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862154 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862169 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862186 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862203 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862218 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862234 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862248 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862263 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862278 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862294 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862311 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862326 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862341 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862355 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862370 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862385 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862401 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862415 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862430 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862445 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862460 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862474 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862496 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862513 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862528 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862544 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862561 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862576 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862591 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862606 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862622 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862637 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862655 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862671 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862687 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862704 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862737 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862753 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862767 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862782 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862797 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862813 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862831 5010 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862844 5010 reconstruct.go:97] "Volume reconstruction finished" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.862854 5010 reconciler.go:26] "Reconciler: start to sync state" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.875590 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.882431 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.882533 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.882572 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.884250 5010 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.884270 5010 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.884294 5010 state_mem.go:36] "Initialized new in-memory state store" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.887240 5010 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.890170 5010 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.890256 5010 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.890302 5010 kubelet.go:2335] "Starting kubelet main sync loop" Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.890384 5010 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 26 15:26:19 crc kubenswrapper[5010]: W1126 15:26:19.892069 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.892148 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.899829 5010 policy_none.go:49] "None policy: Start" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.900775 5010 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.900857 5010 state_mem.go:35] "Initializing new in-memory state store" Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.925938 5010 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.953056 5010 manager.go:334] "Starting Device Plugin manager" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.953143 5010 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.953172 5010 server.go:79] "Starting device plugin registration server" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.953949 5010 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.953983 5010 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.954219 5010 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.954356 5010 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.954376 5010 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 26 15:26:19 crc kubenswrapper[5010]: E1126 15:26:19.965684 5010 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.991311 5010 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.991530 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.993505 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.993621 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.993641 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.994075 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.994277 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.994355 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995365 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995419 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995433 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995673 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995683 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995736 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995749 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995878 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.995932 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.996671 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.996739 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.996753 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.996943 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.997132 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.997192 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.997130 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.997287 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.997314 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998143 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998184 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998202 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998349 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998504 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998604 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998678 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998699 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.998796 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.999429 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.999480 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.999505 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:19 crc kubenswrapper[5010]: I1126 15:26:19.999930 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.000004 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.000562 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.000614 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.000637 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.001780 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.001978 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.002023 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.030428 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="400ms" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.054317 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.056033 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.056092 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.056111 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.056145 5010 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.056865 5010 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.154:6443: connect: connection refused" node="crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.066611 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.066668 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.066738 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.066821 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.066942 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067048 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067149 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067214 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067253 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067288 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067320 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067383 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067479 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067529 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.067561 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168479 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168545 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168579 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168658 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168701 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168800 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168827 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168823 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168925 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168844 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168971 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168922 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168917 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168847 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169087 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169121 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169127 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.168912 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169153 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169196 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169262 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169232 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169179 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169358 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169316 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169443 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169484 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169497 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.169593 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.257820 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.260112 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.260167 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.260187 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.260223 5010 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.260945 5010 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.154:6443: connect: connection refused" node="crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.329472 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.354190 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.368645 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.382460 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-039bc8318800a8468a218142a3d1478a436950e3720e0497cd8d3334657478be WatchSource:0}: Error finding container 039bc8318800a8468a218142a3d1478a436950e3720e0497cd8d3334657478be: Status 404 returned error can't find the container with id 039bc8318800a8468a218142a3d1478a436950e3720e0497cd8d3334657478be Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.394189 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.400367 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-4a47e113e9fe1b426392562c8ac9b88726258b301dd6ebf9834615bb5be24991 WatchSource:0}: Error finding container 4a47e113e9fe1b426392562c8ac9b88726258b301dd6ebf9834615bb5be24991: Status 404 returned error can't find the container with id 4a47e113e9fe1b426392562c8ac9b88726258b301dd6ebf9834615bb5be24991 Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.404100 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.416072 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-113cf9b82f17b9f9a76cc6e7e77443493d1bec048921cf26bab32d6904040d26 WatchSource:0}: Error finding container 113cf9b82f17b9f9a76cc6e7e77443493d1bec048921cf26bab32d6904040d26: Status 404 returned error can't find the container with id 113cf9b82f17b9f9a76cc6e7e77443493d1bec048921cf26bab32d6904040d26 Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.418294 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-29a43bfc2a31bd01002fa12f319b8f9e6147266777c034526aaead1709e0e758 WatchSource:0}: Error finding container 29a43bfc2a31bd01002fa12f319b8f9e6147266777c034526aaead1709e0e758: Status 404 returned error can't find the container with id 29a43bfc2a31bd01002fa12f319b8f9e6147266777c034526aaead1709e0e758 Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.427816 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-cda1c7b5dfa985f6d16e6d10d349a2de9aa68ee9b4dda73a68f7564088a2c127 WatchSource:0}: Error finding container cda1c7b5dfa985f6d16e6d10d349a2de9aa68ee9b4dda73a68f7564088a2c127: Status 404 returned error can't find the container with id cda1c7b5dfa985f6d16e6d10d349a2de9aa68ee9b4dda73a68f7564088a2c127 Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.431660 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="800ms" Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.490348 5010 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.154:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b97fa2db2246e default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 15:26:19.809948782 +0000 UTC m=+0.600665970,LastTimestamp:2025-11-26 15:26:19.809948782 +0000 UTC m=+0.600665970,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.661605 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.663769 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.663814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.663826 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.663861 5010 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.664433 5010 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.154:6443: connect: connection refused" node="crc" Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.675878 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.675964 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.698059 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.698180 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.812046 5010 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:20 crc kubenswrapper[5010]: W1126 15:26:20.840642 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:20 crc kubenswrapper[5010]: E1126 15:26:20.840790 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.896225 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"29a43bfc2a31bd01002fa12f319b8f9e6147266777c034526aaead1709e0e758"} Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.897871 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"113cf9b82f17b9f9a76cc6e7e77443493d1bec048921cf26bab32d6904040d26"} Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.899973 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4a47e113e9fe1b426392562c8ac9b88726258b301dd6ebf9834615bb5be24991"} Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.901338 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"039bc8318800a8468a218142a3d1478a436950e3720e0497cd8d3334657478be"} Nov 26 15:26:20 crc kubenswrapper[5010]: I1126 15:26:20.904757 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cda1c7b5dfa985f6d16e6d10d349a2de9aa68ee9b4dda73a68f7564088a2c127"} Nov 26 15:26:21 crc kubenswrapper[5010]: E1126 15:26:21.233525 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="1.6s" Nov 26 15:26:21 crc kubenswrapper[5010]: W1126 15:26:21.361962 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:21 crc kubenswrapper[5010]: E1126 15:26:21.362124 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.464916 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.467833 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.467920 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.467949 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.468003 5010 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 15:26:21 crc kubenswrapper[5010]: E1126 15:26:21.468797 5010 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.154:6443: connect: connection refused" node="crc" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.811766 5010 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.911786 5010 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256" exitCode=0 Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.911925 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256"} Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.911936 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.913477 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.913545 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.913571 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.916656 5010 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e" exitCode=0 Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.916769 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e"} Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.916855 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.918334 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.918396 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.918423 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.920971 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e"} Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.921042 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45"} Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.921066 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c"} Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.924859 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8" exitCode=0 Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.925160 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.925611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8"} Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.927029 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.927084 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.927104 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.929108 5010 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e" exitCode=0 Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.929278 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e"} Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.929300 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.931174 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.931485 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.931546 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.931600 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.941010 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.941076 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:21 crc kubenswrapper[5010]: I1126 15:26:21.941089 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:22 crc kubenswrapper[5010]: W1126 15:26:22.540038 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:22 crc kubenswrapper[5010]: E1126 15:26:22.540145 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.811059 5010 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:22 crc kubenswrapper[5010]: E1126 15:26:22.838746 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="3.2s" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.936972 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.937027 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.937037 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.937194 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.938911 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.938949 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.938958 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.941787 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.941941 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.943214 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.943255 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.943266 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.946971 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.947051 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.947072 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.949268 5010 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d" exitCode=0 Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.949329 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.949338 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.950422 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.950459 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.950472 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.952954 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"942d958bdc0e38ec0d1362ab378f5623b67782d05dce2a4cc4fcc0a41220636f"} Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.953160 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.964564 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.964646 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:22 crc kubenswrapper[5010]: I1126 15:26:22.964837 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:22 crc kubenswrapper[5010]: W1126 15:26:22.966860 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:22 crc kubenswrapper[5010]: E1126 15:26:22.966975 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.068976 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.070865 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.070926 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.070936 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.070966 5010 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 15:26:23 crc kubenswrapper[5010]: E1126 15:26:23.071977 5010 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.154:6443: connect: connection refused" node="crc" Nov 26 15:26:23 crc kubenswrapper[5010]: W1126 15:26:23.367111 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:23 crc kubenswrapper[5010]: E1126 15:26:23.367223 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:23 crc kubenswrapper[5010]: W1126 15:26:23.457668 5010 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.154:6443: connect: connection refused Nov 26 15:26:23 crc kubenswrapper[5010]: E1126 15:26:23.457820 5010 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.154:6443: connect: connection refused" logger="UnhandledError" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.959445 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b"} Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.959517 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611"} Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.959680 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.960816 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.960862 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.960875 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.964052 5010 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe" exitCode=0 Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.964162 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.964196 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.964746 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.965126 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe"} Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.965219 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.965609 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.966428 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.966460 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.966472 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967056 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967138 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967180 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967072 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967248 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967265 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967098 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967344 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:23 crc kubenswrapper[5010]: I1126 15:26:23.967372 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.016470 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.054249 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.663647 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.675845 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.972755 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.973277 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207"} Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.973381 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.973406 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7"} Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.973895 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3"} Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.973983 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.974220 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.977878 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.977977 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.978004 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.978000 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.978864 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.978888 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.978916 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.978943 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:24 crc kubenswrapper[5010]: I1126 15:26:24.979057 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.238918 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.319147 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.796660 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.983198 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd"} Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.983275 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc"} Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.983276 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.983372 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.983376 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985078 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985123 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985132 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985169 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985188 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985264 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985312 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:25 crc kubenswrapper[5010]: I1126 15:26:25.985329 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.272600 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.274689 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.274767 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.274785 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.274819 5010 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.413550 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.986106 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.986157 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.986209 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987376 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987446 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987465 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987569 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987604 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987615 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987887 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987939 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:26 crc kubenswrapper[5010]: I1126 15:26:26.987957 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.016772 5010 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.016877 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.086681 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.989404 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.989573 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.991085 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.991132 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.991150 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.991192 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.991215 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:27 crc kubenswrapper[5010]: I1126 15:26:27.991227 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:29 crc kubenswrapper[5010]: I1126 15:26:29.450897 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 26 15:26:29 crc kubenswrapper[5010]: I1126 15:26:29.451263 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:29 crc kubenswrapper[5010]: I1126 15:26:29.453387 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:29 crc kubenswrapper[5010]: I1126 15:26:29.453446 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:29 crc kubenswrapper[5010]: I1126 15:26:29.453465 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:29 crc kubenswrapper[5010]: E1126 15:26:29.965836 5010 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 15:26:33 crc kubenswrapper[5010]: I1126 15:26:33.582199 5010 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 26 15:26:33 crc kubenswrapper[5010]: I1126 15:26:33.583814 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 26 15:26:33 crc kubenswrapper[5010]: I1126 15:26:33.811174 5010 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 26 15:26:34 crc kubenswrapper[5010]: I1126 15:26:34.860858 5010 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 15:26:34 crc kubenswrapper[5010]: I1126 15:26:34.860948 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 15:26:34 crc kubenswrapper[5010]: I1126 15:26:34.875368 5010 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 15:26:34 crc kubenswrapper[5010]: I1126 15:26:34.875697 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 15:26:35 crc kubenswrapper[5010]: I1126 15:26:35.248807 5010 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]log ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]etcd ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/openshift.io-api-request-count-filter ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/openshift.io-startkubeinformers ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/openshift.io-openshift-apiserver-reachable ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/openshift.io-oauth-apiserver-reachable ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/generic-apiserver-start-informers ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/priority-and-fairness-config-consumer ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/priority-and-fairness-filter ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-apiextensions-informers ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-apiextensions-controllers ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/crd-informer-synced ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-system-namespaces-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-cluster-authentication-info-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-kube-apiserver-identity-lease-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-kube-apiserver-identity-lease-garbage-collector ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-legacy-token-tracking-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-service-ip-repair-controllers ok Nov 26 15:26:35 crc kubenswrapper[5010]: [-]poststarthook/rbac/bootstrap-roles failed: reason withheld Nov 26 15:26:35 crc kubenswrapper[5010]: [-]poststarthook/scheduling/bootstrap-system-priority-classes failed: reason withheld Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/priority-and-fairness-config-producer ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/bootstrap-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/aggregator-reload-proxy-client-cert ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/start-kube-aggregator-informers ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/apiservice-status-local-available-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/apiservice-status-remote-available-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/apiservice-registration-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/apiservice-wait-for-first-sync ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/apiservice-discovery-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/kube-apiserver-autoregistration ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]autoregister-completion ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/apiservice-openapi-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: [+]poststarthook/apiservice-openapiv3-controller ok Nov 26 15:26:35 crc kubenswrapper[5010]: livez check failed Nov 26 15:26:35 crc kubenswrapper[5010]: I1126 15:26:35.248894 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:26:35 crc kubenswrapper[5010]: I1126 15:26:35.804190 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:35 crc kubenswrapper[5010]: I1126 15:26:35.804437 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:35 crc kubenswrapper[5010]: I1126 15:26:35.807050 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:35 crc kubenswrapper[5010]: I1126 15:26:35.807174 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:35 crc kubenswrapper[5010]: I1126 15:26:35.807193 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:37 crc kubenswrapper[5010]: I1126 15:26:37.018273 5010 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 15:26:37 crc kubenswrapper[5010]: I1126 15:26:37.019026 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.483257 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.483493 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.484872 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.484959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.484987 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.500161 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 26 15:26:39 crc kubenswrapper[5010]: E1126 15:26:39.869073 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.871879 5010 trace.go:236] Trace[1324458521]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 15:26:29.338) (total time: 10533ms): Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[1324458521]: ---"Objects listed" error: 10532ms (15:26:39.871) Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[1324458521]: [10.53300562s] [10.53300562s] END Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.872213 5010 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.872009 5010 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.872097 5010 trace.go:236] Trace[465666213]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 15:26:26.207) (total time: 13664ms): Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[465666213]: ---"Objects listed" error: 13664ms (15:26:39.871) Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[465666213]: [13.664937581s] [13.664937581s] END Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.872469 5010 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.872620 5010 trace.go:236] Trace[1461656910]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 15:26:28.291) (total time: 11581ms): Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[1461656910]: ---"Objects listed" error: 11581ms (15:26:39.872) Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[1461656910]: [11.581088049s] [11.581088049s] END Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.872648 5010 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 15:26:39 crc kubenswrapper[5010]: E1126 15:26:39.876861 5010 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.877520 5010 trace.go:236] Trace[113224201]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 15:26:26.192) (total time: 13684ms): Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[113224201]: ---"Objects listed" error: 13684ms (15:26:39.877) Nov 26 15:26:39 crc kubenswrapper[5010]: Trace[113224201]: [13.684457623s] [13.684457623s] END Nov 26 15:26:39 crc kubenswrapper[5010]: I1126 15:26:39.877789 5010 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.030854 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.033285 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b" exitCode=255 Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.033380 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b"} Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.187215 5010 scope.go:117] "RemoveContainer" containerID="f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.243568 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.409291 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.804939 5010 apiserver.go:52] "Watching apiserver" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.807596 5010 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.808034 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.808407 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.808506 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.808633 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.808862 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.808925 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.808974 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.809004 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.809039 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.809130 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.814477 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.814808 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.815107 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.815495 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.815519 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.815523 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.815659 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.816655 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.820301 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.829513 5010 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.842814 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.856572 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.867060 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878358 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878417 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878445 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878471 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878491 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878516 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878541 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878564 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878591 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878609 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878629 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878650 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878672 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878693 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878730 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878750 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878770 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878797 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878815 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878836 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878856 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878877 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878871 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878898 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878919 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878943 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878942 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.878966 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879058 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879080 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879096 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879104 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879126 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879142 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879139 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879159 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879176 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879193 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879211 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879229 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879245 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879264 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879286 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879300 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879306 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879340 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879341 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879351 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879360 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879405 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879428 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879447 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879466 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879483 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879500 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879516 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879519 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879536 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879547 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879558 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879577 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879597 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879608 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879615 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879632 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879650 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879666 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879682 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879708 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879741 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879752 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879760 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879781 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879796 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879837 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879865 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879892 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879913 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879932 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879950 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879968 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879985 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880002 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880026 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880047 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880068 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880086 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880109 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880126 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880143 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880161 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880176 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880273 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880293 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880312 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880334 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880357 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880374 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880392 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880410 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880427 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880443 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880460 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880481 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880497 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880514 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880532 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880548 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880568 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880586 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880601 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880617 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880637 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880653 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880671 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880689 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880710 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880751 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880770 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880792 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.879948 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880003 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880039 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880075 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880135 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880238 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880301 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880329 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880384 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880406 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887256 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880463 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880476 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880575 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880612 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880634 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880651 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880740 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880760 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.880823 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:26:41.380795266 +0000 UTC m=+22.171512414 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887404 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887434 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887447 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887457 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887481 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887507 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887526 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887560 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887579 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887598 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887617 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887620 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887676 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880863 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880898 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880893 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880892 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.888690 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880949 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880979 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.881047 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.881151 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.881186 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.881234 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.881513 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.885029 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.885412 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.885755 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.886236 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.886291 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.886417 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.886542 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.886621 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.886761 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.886796 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887097 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887272 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887844 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.880838 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887876 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.888102 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.888339 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.888856 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.889097 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.889276 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.889291 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.889461 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.889466 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.887648 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.890598 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.890938 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891012 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891078 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891153 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891222 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891288 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891359 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891422 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891486 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891614 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891688 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891778 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891842 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891909 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.891992 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892063 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892126 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892196 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892263 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892335 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892403 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892472 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892542 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892608 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892683 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892793 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892867 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.892935 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893003 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893073 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893143 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893213 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893284 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893355 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893440 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893514 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893585 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893657 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893761 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893842 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893914 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893983 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894052 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894120 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894192 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894260 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894327 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894409 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894481 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894554 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894630 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894703 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894797 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894871 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.894943 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895014 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895081 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895280 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895372 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895455 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895528 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895596 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895667 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895753 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895831 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895912 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895979 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896084 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896159 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896228 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896298 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896374 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896455 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896521 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896593 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896662 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896747 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896824 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896924 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896996 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897074 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897155 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897268 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897344 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897433 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897517 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897595 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897669 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897761 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897843 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897987 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898114 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898177 5010 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898236 5010 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898294 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898356 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898433 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898496 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898555 5010 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898611 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898668 5010 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898747 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898811 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898868 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898923 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.898975 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899033 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899089 5010 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899145 5010 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899199 5010 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899258 5010 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899311 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899369 5010 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899428 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899486 5010 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899544 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899609 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899663 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899812 5010 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899871 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899932 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899990 5010 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900061 5010 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900122 5010 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900178 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900228 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900283 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900345 5010 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900400 5010 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900457 5010 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900571 5010 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900624 5010 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900681 5010 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900752 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902238 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902266 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902330 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902346 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902361 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902375 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902389 5010 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902401 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902413 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902425 5010 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902438 5010 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902448 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902462 5010 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902473 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902484 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902495 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902507 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902519 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902530 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893006 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.893875 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895080 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897582 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903526 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903804 5010 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904399 5010 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895497 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.895662 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896334 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.896990 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897271 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897331 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897584 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897864 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.897964 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900405 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900489 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900695 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900853 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.900906 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.901006 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.901096 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.901613 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.901668 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.901682 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.901942 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.901959 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902040 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902176 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902488 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902537 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902568 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902630 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902758 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902795 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903009 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.902943 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903160 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903236 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903286 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903385 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903400 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903438 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903872 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903886 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904178 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904184 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904383 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904421 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.904558 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904600 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904764 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.904843 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.905187 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.905228 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.905240 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.905608 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.899343 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.905854 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.905892 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.905916 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906032 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906234 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906247 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906295 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.903456 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906508 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906632 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906688 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906690 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906815 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.906919 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907040 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907055 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907391 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907536 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907597 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907591 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907903 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.907971 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.908010 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:41.407991908 +0000 UTC m=+22.198709056 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.908068 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.908323 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.908452 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.908469 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:41.408460409 +0000 UTC m=+22.199177557 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.908862 5010 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.908876 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.908900 5010 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.908909 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.908921 5010 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.909074 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.909127 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.909226 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.911463 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.913544 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.913947 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.914034 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.914107 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.914209 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:41.414198461 +0000 UTC m=+22.204915609 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.913964 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.914861 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.915132 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.916098 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.916102 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.916884 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.923392 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.924222 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.928908 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.929151 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.929208 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.929215 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.929238 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:40 crc kubenswrapper[5010]: E1126 15:26:40.929309 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:41.429284564 +0000 UTC m=+22.220001702 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.929350 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.930323 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.930744 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.930893 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.931068 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.931238 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.931485 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.931895 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.931941 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.931973 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.932012 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.932283 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.932575 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.932833 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.931850 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.933199 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.933844 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.933839 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.933991 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.934394 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.934268 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.934446 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.934822 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.934900 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.935154 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.935516 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.937424 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.937471 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.937450 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.937475 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.937577 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.937628 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.937902 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.938276 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.938700 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.940209 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.948129 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.952173 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.957609 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.961929 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.965730 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:26:40 crc kubenswrapper[5010]: I1126 15:26:40.970983 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009627 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009779 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009837 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009855 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009870 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009885 5010 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009901 5010 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009915 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009928 5010 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009942 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009957 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009971 5010 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009985 5010 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.009999 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010014 5010 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010157 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010195 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010256 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010424 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010452 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010468 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010482 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010498 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010514 5010 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010529 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010545 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010561 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010576 5010 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010653 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010687 5010 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010709 5010 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010746 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010761 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010775 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010788 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010802 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010816 5010 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010829 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010859 5010 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010901 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010916 5010 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010931 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010946 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010984 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.010999 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011013 5010 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011026 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011058 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011071 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011085 5010 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011098 5010 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011111 5010 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011143 5010 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011155 5010 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011168 5010 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011180 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011192 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011225 5010 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011236 5010 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011248 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011262 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011295 5010 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011307 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011320 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011334 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011347 5010 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011380 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011393 5010 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011406 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011418 5010 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011431 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011468 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011481 5010 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011492 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011507 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011611 5010 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011626 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011638 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011652 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011683 5010 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011696 5010 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011732 5010 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011745 5010 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011759 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011771 5010 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011784 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011819 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011831 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011844 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011856 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011897 5010 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011910 5010 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011923 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011937 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011970 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011984 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.011997 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012011 5010 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012024 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012057 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012070 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012082 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012095 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012108 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012140 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012153 5010 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012166 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012178 5010 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012211 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012223 5010 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012236 5010 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012248 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012260 5010 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012293 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012306 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012319 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012331 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012344 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012377 5010 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012390 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012402 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012413 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012426 5010 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012463 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012476 5010 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012488 5010 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012500 5010 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012535 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.012550 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.041327 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.044520 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429"} Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.054666 5010 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-apiserver-crc\" already exists" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.056354 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.058620 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.070598 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.082407 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.093146 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.114860 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.127178 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.127178 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.133261 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.139838 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.140058 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 15:26:41 crc kubenswrapper[5010]: W1126 15:26:41.145141 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-94a1d68e7cb4e0c31c16370ddd0412e26f9bcc2fce233324e5204ca0490d3596 WatchSource:0}: Error finding container 94a1d68e7cb4e0c31c16370ddd0412e26f9bcc2fce233324e5204ca0490d3596: Status 404 returned error can't find the container with id 94a1d68e7cb4e0c31c16370ddd0412e26f9bcc2fce233324e5204ca0490d3596 Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.158069 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.158580 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-5mb6f"] Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.159100 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-kt7rg"] Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.159425 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.159532 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.165336 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.165739 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.165866 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.165909 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.166096 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.166149 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.166291 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.166750 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.178528 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.191457 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.202550 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.214497 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.226138 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.237614 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.253117 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.271848 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.283617 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.293114 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.315885 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.316173 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/a6b0e322-9296-4356-9e3b-6497381eb30d-rootfs\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.316232 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjf7z\" (UniqueName: \"kubernetes.io/projected/a6b0e322-9296-4356-9e3b-6497381eb30d-kube-api-access-qjf7z\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.316295 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6838c72e-3f88-4cb6-91e6-954cf7d9fef3-hosts-file\") pod \"node-resolver-5mb6f\" (UID: \"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\") " pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.316404 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mhss\" (UniqueName: \"kubernetes.io/projected/6838c72e-3f88-4cb6-91e6-954cf7d9fef3-kube-api-access-6mhss\") pod \"node-resolver-5mb6f\" (UID: \"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\") " pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.316452 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a6b0e322-9296-4356-9e3b-6497381eb30d-proxy-tls\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.316485 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6b0e322-9296-4356-9e3b-6497381eb30d-mcd-auth-proxy-config\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.329473 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.341853 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.358247 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.371211 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.379566 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.393448 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.404972 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.417271 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.417364 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.417410 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mhss\" (UniqueName: \"kubernetes.io/projected/6838c72e-3f88-4cb6-91e6-954cf7d9fef3-kube-api-access-6mhss\") pod \"node-resolver-5mb6f\" (UID: \"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\") " pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.417434 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a6b0e322-9296-4356-9e3b-6497381eb30d-proxy-tls\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.417454 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6b0e322-9296-4356-9e3b-6497381eb30d-mcd-auth-proxy-config\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.417537 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:26:42.417496293 +0000 UTC m=+23.208213481 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.417564 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.417619 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:42.417604905 +0000 UTC m=+23.208322053 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.417847 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.417880 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.417901 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.417970 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:42.417954864 +0000 UTC m=+23.208672042 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418015 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418057 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/a6b0e322-9296-4356-9e3b-6497381eb30d-rootfs\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418085 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjf7z\" (UniqueName: \"kubernetes.io/projected/a6b0e322-9296-4356-9e3b-6497381eb30d-kube-api-access-qjf7z\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418107 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418124 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6838c72e-3f88-4cb6-91e6-954cf7d9fef3-hosts-file\") pod \"node-resolver-5mb6f\" (UID: \"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\") " pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.418222 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.418257 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:42.418247441 +0000 UTC m=+23.208964589 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418294 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/a6b0e322-9296-4356-9e3b-6497381eb30d-rootfs\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418382 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6b0e322-9296-4356-9e3b-6497381eb30d-mcd-auth-proxy-config\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.418392 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/6838c72e-3f88-4cb6-91e6-954cf7d9fef3-hosts-file\") pod \"node-resolver-5mb6f\" (UID: \"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\") " pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.424574 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a6b0e322-9296-4356-9e3b-6497381eb30d-proxy-tls\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.451859 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mhss\" (UniqueName: \"kubernetes.io/projected/6838c72e-3f88-4cb6-91e6-954cf7d9fef3-kube-api-access-6mhss\") pod \"node-resolver-5mb6f\" (UID: \"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\") " pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.453303 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjf7z\" (UniqueName: \"kubernetes.io/projected/a6b0e322-9296-4356-9e3b-6497381eb30d-kube-api-access-qjf7z\") pod \"machine-config-daemon-kt7rg\" (UID: \"a6b0e322-9296-4356-9e3b-6497381eb30d\") " pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.481187 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.490086 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-5mb6f" Nov 26 15:26:41 crc kubenswrapper[5010]: W1126 15:26:41.494611 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6b0e322_9296_4356_9e3b_6497381eb30d.slice/crio-4efa0200b55e11d03f7d2d31b1d76397e5cfddb459ff043ac1f1fc311f2292b0 WatchSource:0}: Error finding container 4efa0200b55e11d03f7d2d31b1d76397e5cfddb459ff043ac1f1fc311f2292b0: Status 404 returned error can't find the container with id 4efa0200b55e11d03f7d2d31b1d76397e5cfddb459ff043ac1f1fc311f2292b0 Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.519546 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.519693 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.519750 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.519765 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:41 crc kubenswrapper[5010]: E1126 15:26:41.519812 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:42.519797291 +0000 UTC m=+23.310514449 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.533630 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-94lzp"] Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.534173 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.536515 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.536580 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.536944 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.537185 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-sc4tv"] Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.537457 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.537936 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.541457 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.541561 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.541650 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.574746 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.593704 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.608853 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.620818 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-cnibin\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.620851 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-hostroot\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.620887 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-cni-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.620908 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-k8s-cni-cncf-io\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.620925 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-cni-bin\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.620939 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-cni-multus\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.620956 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-kubelet\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621160 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-multus-certs\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621208 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-socket-dir-parent\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621234 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-conf-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621262 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0a5a476f-6c13-4c62-8042-d9b37846aa18-cni-binary-copy\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621279 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v67mh\" (UniqueName: \"kubernetes.io/projected/0a5a476f-6c13-4c62-8042-d9b37846aa18-kube-api-access-v67mh\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621295 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-daemon-config\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621332 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-system-cni-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621419 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-os-release\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621500 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-netns\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.621519 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-etc-kubernetes\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.635033 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.650649 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.661384 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.685187 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.698340 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.707801 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.717175 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722149 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-os-release\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722184 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-etc-kubernetes\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722202 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-hostroot\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722227 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-k8s-cni-cncf-io\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722250 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-cni-bin\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722267 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-cni-multus\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722292 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-os-release\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722312 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722330 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0a5a476f-6c13-4c62-8042-d9b37846aa18-cni-binary-copy\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722345 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-conf-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722363 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-daemon-config\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722354 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-hostroot\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722384 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-etc-kubernetes\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722381 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722461 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-cni-bin\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722498 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-cni-multus\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722555 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtczx\" (UniqueName: \"kubernetes.io/projected/56f2d574-eefa-4be0-bf3f-aff08053f4e8-kube-api-access-xtczx\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722517 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-k8s-cni-cncf-io\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722566 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-conf-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722643 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-system-cni-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723054 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-netns\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722663 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-os-release\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723130 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-netns\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723137 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-cnibin\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.722734 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-system-cni-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723164 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-cni-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723189 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-cnibin\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723242 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/0a5a476f-6c13-4c62-8042-d9b37846aa18-cni-binary-copy\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723270 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-daemon-config\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723316 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-cni-dir\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723235 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-kubelet\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723364 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-var-lib-kubelet\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723370 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cnibin\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723464 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cni-binary-copy\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723515 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-multus-certs\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723540 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-socket-dir-parent\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723561 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-host-run-multus-certs\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723564 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-system-cni-dir\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723615 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/0a5a476f-6c13-4c62-8042-d9b37846aa18-multus-socket-dir-parent\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.723621 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v67mh\" (UniqueName: \"kubernetes.io/projected/0a5a476f-6c13-4c62-8042-d9b37846aa18-kube-api-access-v67mh\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.734209 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.743569 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.752367 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.763771 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.773902 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.784408 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.800060 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.802125 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v67mh\" (UniqueName: \"kubernetes.io/projected/0a5a476f-6c13-4c62-8042-d9b37846aa18-kube-api-access-v67mh\") pod \"multus-94lzp\" (UID: \"0a5a476f-6c13-4c62-8042-d9b37846aa18\") " pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.813248 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824101 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-os-release\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824272 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824332 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824378 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtczx\" (UniqueName: \"kubernetes.io/projected/56f2d574-eefa-4be0-bf3f-aff08053f4e8-kube-api-access-xtczx\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824469 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cnibin\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824523 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cni-binary-copy\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824553 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-system-cni-dir\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824662 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-system-cni-dir\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.824820 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-os-release\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.825779 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-tuning-conf-dir\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.825885 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cnibin\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.826041 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.826696 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/56f2d574-eefa-4be0-bf3f-aff08053f4e8-cni-binary-copy\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.827252 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.838487 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.845973 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtczx\" (UniqueName: \"kubernetes.io/projected/56f2d574-eefa-4be0-bf3f-aff08053f4e8-kube-api-access-xtczx\") pod \"multus-additional-cni-plugins-sc4tv\" (UID: \"56f2d574-eefa-4be0-bf3f-aff08053f4e8\") " pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.850504 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.860137 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.861201 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-94lzp" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.870574 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 15:26:41 crc kubenswrapper[5010]: W1126 15:26:41.872297 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0a5a476f_6c13_4c62_8042_d9b37846aa18.slice/crio-a2cf9e9ce189da86890b5505f3ee266b95a25f51d5f25ab9b39eed25f973f0f4 WatchSource:0}: Error finding container a2cf9e9ce189da86890b5505f3ee266b95a25f51d5f25ab9b39eed25f973f0f4: Status 404 returned error can't find the container with id a2cf9e9ce189da86890b5505f3ee266b95a25f51d5f25ab9b39eed25f973f0f4 Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.880970 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" Nov 26 15:26:41 crc kubenswrapper[5010]: W1126 15:26:41.893492 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56f2d574_eefa_4be0_bf3f_aff08053f4e8.slice/crio-c163855dedd364c617d0452f35bf8eba10e8bdb44408f12503caf5cd2b08cc32 WatchSource:0}: Error finding container c163855dedd364c617d0452f35bf8eba10e8bdb44408f12503caf5cd2b08cc32: Status 404 returned error can't find the container with id c163855dedd364c617d0452f35bf8eba10e8bdb44408f12503caf5cd2b08cc32 Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.896021 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.896761 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.898620 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.899664 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.900502 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.901779 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.902668 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.904050 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.905040 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.906385 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.907190 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.908854 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.909545 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.910424 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.911739 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.912897 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.913838 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.914436 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.915929 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.916867 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.917611 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.919126 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.919822 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.921342 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.921994 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.923580 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.924677 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.925441 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.927085 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.927835 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.928527 5010 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.929223 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.932511 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.933555 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.935020 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.938135 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.939122 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.940403 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.941065 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.942458 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.943013 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.944331 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.945407 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.946384 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.946937 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.947837 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.948603 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.949370 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.950251 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.950826 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.951316 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.952682 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.953404 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.954421 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.955029 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hlqt9"] Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.955942 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.959500 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.959567 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.959582 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.959791 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.960141 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.968116 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 15:26:41 crc kubenswrapper[5010]: I1126 15:26:41.987759 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.016498 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.050424 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-5mb6f" event={"ID":"6838c72e-3f88-4cb6-91e6-954cf7d9fef3","Type":"ContainerStarted","Data":"6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.050542 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-5mb6f" event={"ID":"6838c72e-3f88-4cb6-91e6-954cf7d9fef3","Type":"ContainerStarted","Data":"acb7a12245e577e73025ab1357d810084216ff01a2619bfa4667cf83fd9f6564"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.054080 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.054112 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.054123 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"4efa0200b55e11d03f7d2d31b1d76397e5cfddb459ff043ac1f1fc311f2292b0"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.055117 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.055524 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.055550 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"94a1d68e7cb4e0c31c16370ddd0412e26f9bcc2fce233324e5204ca0490d3596"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.056451 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerStarted","Data":"c163855dedd364c617d0452f35bf8eba10e8bdb44408f12503caf5cd2b08cc32"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.058027 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerStarted","Data":"263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.058054 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerStarted","Data":"a2cf9e9ce189da86890b5505f3ee266b95a25f51d5f25ab9b39eed25f973f0f4"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.058880 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"0d66a16e2978e95dc31894d988b2e8ef9113cb9a79753500ca26b4ed79b57dbf"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.061562 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.061629 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.061648 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"dee4ec2070c48dd6dabd615643be0f03f77958c0170ee2809c0481d3e5c9773e"} Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.061908 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.098014 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129016 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-log-socket\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129069 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-bin\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129089 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-node-log\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129129 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129150 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-script-lib\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129178 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-ovn\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129278 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-ovn-kubernetes\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129369 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-systemd\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129403 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129426 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-config\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129458 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-slash\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129494 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-kubelet\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129514 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxqgl\" (UniqueName: \"kubernetes.io/projected/f10d9600-fac2-43e9-ad75-91b3c1f5b749-kube-api-access-qxqgl\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129552 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-systemd-units\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129573 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-netd\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129605 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovn-node-metrics-cert\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129622 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-var-lib-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129640 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-netns\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129658 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-etc-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.129679 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-env-overrides\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.137294 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.176360 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.217470 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.230954 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-log-socket\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231221 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-bin\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231052 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-log-socket\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231352 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-bin\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231330 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-node-log\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231494 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-node-log\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231501 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231575 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231600 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-ovn\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231739 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-ovn-kubernetes\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231742 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-ovn\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231769 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-script-lib\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231800 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-ovn-kubernetes\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231807 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-systemd\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231826 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231845 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-config\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231886 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-slash\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231936 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-slash\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.231981 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-kubelet\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232121 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-systemd\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232119 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxqgl\" (UniqueName: \"kubernetes.io/projected/f10d9600-fac2-43e9-ad75-91b3c1f5b749-kube-api-access-qxqgl\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232173 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-systemd-units\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232192 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-netd\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232229 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovn-node-metrics-cert\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232260 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-var-lib-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232275 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-netns\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232293 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-etc-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232313 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-env-overrides\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232325 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-kubelet\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232595 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-var-lib-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232625 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232656 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-netns\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232655 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-script-lib\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232679 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-etc-openvswitch\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232742 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-netd\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.232781 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-systemd-units\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.233158 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-env-overrides\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.233159 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-config\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.238854 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovn-node-metrics-cert\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.267850 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.285022 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxqgl\" (UniqueName: \"kubernetes.io/projected/f10d9600-fac2-43e9-ad75-91b3c1f5b749-kube-api-access-qxqgl\") pod \"ovnkube-node-hlqt9\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.316102 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.365165 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.395195 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.434346 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.434639 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:26:44.434584855 +0000 UTC m=+25.225302043 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.434837 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435032 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435152 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:44.435135479 +0000 UTC m=+25.225852627 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435199 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435293 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435363 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435445 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:44.435436876 +0000 UTC m=+25.226154024 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.435010 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.435615 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435811 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.435893 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:44.435876707 +0000 UTC m=+25.226593845 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.438151 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.482464 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.523630 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.537444 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.537902 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.538038 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.538125 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.538282 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:44.538258558 +0000 UTC m=+25.328975716 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.557111 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.568429 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.609839 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.637454 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.680057 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.728148 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.763457 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.802697 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.839157 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.883397 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.890756 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.890875 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.891173 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.891172 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.891273 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:42 crc kubenswrapper[5010]: E1126 15:26:42.891338 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.920821 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.955955 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:42 crc kubenswrapper[5010]: I1126 15:26:42.994234 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.038865 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.067484 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f2d574-eefa-4be0-bf3f-aff08053f4e8" containerID="f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b" exitCode=0 Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.067530 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerDied","Data":"f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b"} Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.069204 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"4552169679da420cd350889ea2b3776c357f92990caf200c15d5e729ed5c00f5"} Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.092986 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.121051 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.160654 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.199482 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.239352 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.287430 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.319257 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.361241 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.398838 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.413137 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-ckdwd"] Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.414025 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.427860 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.447608 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.466863 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.486662 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.521807 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.547949 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lmwfp\" (UniqueName: \"kubernetes.io/projected/a4ab940b-709f-4f03-ac81-9d6d57364f48-kube-api-access-lmwfp\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.547989 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a4ab940b-709f-4f03-ac81-9d6d57364f48-serviceca\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.548007 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a4ab940b-709f-4f03-ac81-9d6d57364f48-host\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.557337 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.595453 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.634761 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.648669 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a4ab940b-709f-4f03-ac81-9d6d57364f48-serviceca\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.648747 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a4ab940b-709f-4f03-ac81-9d6d57364f48-host\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.648824 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lmwfp\" (UniqueName: \"kubernetes.io/projected/a4ab940b-709f-4f03-ac81-9d6d57364f48-kube-api-access-lmwfp\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.649021 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/a4ab940b-709f-4f03-ac81-9d6d57364f48-host\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.650390 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/a4ab940b-709f-4f03-ac81-9d6d57364f48-serviceca\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.689209 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lmwfp\" (UniqueName: \"kubernetes.io/projected/a4ab940b-709f-4f03-ac81-9d6d57364f48-kube-api-access-lmwfp\") pod \"node-ca-ckdwd\" (UID: \"a4ab940b-709f-4f03-ac81-9d6d57364f48\") " pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.697098 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.728088 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-ckdwd" Nov 26 15:26:43 crc kubenswrapper[5010]: W1126 15:26:43.745931 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4ab940b_709f_4f03_ac81_9d6d57364f48.slice/crio-e958f61108caaa243b7099205652a8e9163a4de52bb0cbaa859df655cbebe170 WatchSource:0}: Error finding container e958f61108caaa243b7099205652a8e9163a4de52bb0cbaa859df655cbebe170: Status 404 returned error can't find the container with id e958f61108caaa243b7099205652a8e9163a4de52bb0cbaa859df655cbebe170 Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.746348 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.780556 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.829347 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.861514 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.900673 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.937912 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:43 crc kubenswrapper[5010]: I1126 15:26:43.978444 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:43Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.024127 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.027581 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.032381 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.036443 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.074937 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-ckdwd" event={"ID":"a4ab940b-709f-4f03-ac81-9d6d57364f48","Type":"ContainerStarted","Data":"ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334"} Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.075034 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-ckdwd" event={"ID":"a4ab940b-709f-4f03-ac81-9d6d57364f48","Type":"ContainerStarted","Data":"e958f61108caaa243b7099205652a8e9163a4de52bb0cbaa859df655cbebe170"} Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.079235 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba"} Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.082712 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f2d574-eefa-4be0-bf3f-aff08053f4e8" containerID="21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f" exitCode=0 Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.082914 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerDied","Data":"21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f"} Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.085949 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1" exitCode=0 Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.086045 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.099691 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.115331 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.157099 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.193562 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.238998 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.279861 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.317175 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.356950 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.396843 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.439693 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.457998 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458195 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:26:48.458161376 +0000 UTC m=+29.248878534 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.458272 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.458368 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.458412 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458449 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458525 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:48.458504905 +0000 UTC m=+29.249222063 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458573 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458591 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458605 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458653 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:48.458641618 +0000 UTC m=+29.249358776 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458695 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.458747 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:48.458738731 +0000 UTC m=+29.249455889 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.479981 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.522418 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.556966 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.558872 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.559083 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.559105 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.559122 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.559176 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:48.559159923 +0000 UTC m=+29.349877081 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.599558 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.643403 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.690550 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.722084 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.760200 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.800262 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.882093 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:44Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.891082 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.891157 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.891225 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:44 crc kubenswrapper[5010]: I1126 15:26:44.891260 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.891290 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:44 crc kubenswrapper[5010]: E1126 15:26:44.891502 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.093022 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f2d574-eefa-4be0-bf3f-aff08053f4e8" containerID="b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc" exitCode=0 Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.093125 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerDied","Data":"b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc"} Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.097611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.097688 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.097747 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.097773 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.097789 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.097840 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.118315 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.134603 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.151076 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.167798 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.185611 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.203118 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.226552 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.244466 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.265682 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.290297 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.314575 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.332043 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.364384 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.396786 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:45 crc kubenswrapper[5010]: I1126 15:26:45.439549 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:45Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.104545 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f2d574-eefa-4be0-bf3f-aff08053f4e8" containerID="45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd" exitCode=0 Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.104591 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerDied","Data":"45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.120692 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.151603 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.168792 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.186418 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.202627 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.216017 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.234955 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.249125 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.263292 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.275868 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.277352 5010 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.281352 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.281401 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.281417 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.281770 5010 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.288155 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.291082 5010 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.291453 5010 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.293065 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.293109 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.293121 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.293143 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.293157 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.310279 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.310429 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.316317 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.316362 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.316377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.316403 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.316421 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.332078 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.334059 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.336536 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.336581 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.336593 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.336614 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.336629 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.347370 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.350816 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.355657 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.355732 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.355750 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.355771 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.355786 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.362024 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.368936 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.372911 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.372952 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.372965 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.372988 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.373002 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.385419 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:46Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.385571 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.387296 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.387327 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.387343 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.387359 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.387371 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.490035 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.490078 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.490092 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.490108 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.490118 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.592979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.593023 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.593033 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.593049 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.593061 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.695432 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.695507 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.695528 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.695561 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.695587 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.798371 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.798460 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.798481 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.798515 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.798538 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.891214 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.891301 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.891337 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.891391 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.891639 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:46 crc kubenswrapper[5010]: E1126 15:26:46.891914 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.901853 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.901898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.901914 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.901940 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:46 crc kubenswrapper[5010]: I1126 15:26:46.901956 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:46Z","lastTransitionTime":"2025-11-26T15:26:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.005070 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.005111 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.005120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.005135 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.005145 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.107985 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.108030 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.108042 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.108061 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.108072 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.115450 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.119012 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f2d574-eefa-4be0-bf3f-aff08053f4e8" containerID="227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a" exitCode=0 Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.119066 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerDied","Data":"227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.139756 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.153292 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.175889 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.196960 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.210363 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.210411 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.210423 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.210442 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.210457 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.215963 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.244847 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.270620 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.298410 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.314841 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.314907 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.314926 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.314950 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.314968 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.316826 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.339486 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.351774 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.374638 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.389698 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.401439 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.417556 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.417599 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.417609 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.417625 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.417644 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.422532 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:47Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.520180 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.520227 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.520244 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.520270 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.520287 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.623621 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.623699 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.623938 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.623988 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.624018 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.727385 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.727449 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.727467 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.727493 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.727510 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.830877 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.831199 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.831210 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.831223 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.831232 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.935048 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.935102 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.935114 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.935132 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:47 crc kubenswrapper[5010]: I1126 15:26:47.935144 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:47Z","lastTransitionTime":"2025-11-26T15:26:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.039198 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.039287 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.039305 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.039337 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.039358 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.127822 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f2d574-eefa-4be0-bf3f-aff08053f4e8" containerID="2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd" exitCode=0 Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.127868 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerDied","Data":"2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.142622 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.142679 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.142696 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.142751 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.142771 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.157141 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.175946 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.191872 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.214327 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.237128 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.246495 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.246550 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.246563 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.246581 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.246595 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.256932 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.273261 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.293629 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.313037 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.328052 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.342779 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.349776 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.349812 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.349824 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.349840 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.349851 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.361406 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.372415 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.402343 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.424160 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.452089 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.452135 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.452144 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.452163 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.452174 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.502920 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.503012 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.503068 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.503097 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503169 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:26:56.503142626 +0000 UTC m=+37.293859774 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503252 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503274 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503287 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503333 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:56.50331933 +0000 UTC m=+37.294036488 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503342 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503383 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:56.503374882 +0000 UTC m=+37.294092030 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503479 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.503684 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:56.503635198 +0000 UTC m=+37.294352546 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.554305 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.554364 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.554377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.554400 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.554415 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.604411 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.604695 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.604772 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.604797 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.604888 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:56.604862321 +0000 UTC m=+37.395579479 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.657072 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.657178 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.657208 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.657251 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.657332 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.761079 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.761140 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.761154 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.761176 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.761190 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.864915 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.865007 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.865026 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.865055 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.865075 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.891461 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.891524 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.891470 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.891668 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.891843 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:48 crc kubenswrapper[5010]: E1126 15:26:48.891944 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.969530 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.969618 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.969642 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.969677 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:48 crc kubenswrapper[5010]: I1126 15:26:48.969706 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:48Z","lastTransitionTime":"2025-11-26T15:26:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.073426 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.073492 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.073511 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.073538 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.073558 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.138652 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" event={"ID":"56f2d574-eefa-4be0-bf3f-aff08053f4e8","Type":"ContainerStarted","Data":"72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.160424 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.177799 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.177855 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.177868 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.177892 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.177907 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.179985 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.205847 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.225345 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.247038 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.269549 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.283513 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.283581 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.283609 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.283642 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.283670 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.289813 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.322537 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.358124 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.380467 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.387630 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.387673 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.387692 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.387834 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.387864 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.400609 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.422379 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.447916 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.472532 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.491859 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.491910 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.491937 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.491974 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.492001 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.495092 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.595300 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.595389 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.595408 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.595441 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.595466 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.698317 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.698381 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.698403 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.698430 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.698456 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.801540 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.802109 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.802131 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.802161 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.802182 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.905681 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.905778 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.905800 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.905827 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.905846 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:49Z","lastTransitionTime":"2025-11-26T15:26:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.909096 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.930648 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.951694 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:49 crc kubenswrapper[5010]: I1126 15:26:49.989189 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.020543 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.020587 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.020601 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.020624 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.020638 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.046232 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.084966 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.105625 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.121950 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.123190 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.123224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.123234 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.123250 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.123260 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.133209 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.146295 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.147822 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.147881 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.148028 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.161184 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.172002 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.174964 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.175440 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.183649 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.196132 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.210619 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.222390 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.225754 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.225799 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.225813 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.225832 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.225846 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.235375 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.256960 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.281486 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.308850 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.326164 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.330028 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.330087 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.330103 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.330123 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.330137 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.344415 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.359663 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.379218 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.399189 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.416752 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.433117 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.433406 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.433454 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.433469 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.433493 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.433508 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.449981 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.471189 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.485532 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.536211 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.536272 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.536290 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.536315 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.536334 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.639622 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.639683 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.639738 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.639769 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.639791 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.743212 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.743288 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.743314 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.743340 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.743358 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.846310 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.846759 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.846911 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.847037 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.847160 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.891019 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.891187 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.891220 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:50 crc kubenswrapper[5010]: E1126 15:26:50.891979 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:50 crc kubenswrapper[5010]: E1126 15:26:50.892077 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:50 crc kubenswrapper[5010]: E1126 15:26:50.891952 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.950850 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.950917 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.950935 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.950959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:50 crc kubenswrapper[5010]: I1126 15:26:50.950977 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:50Z","lastTransitionTime":"2025-11-26T15:26:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.053918 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.053985 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.054005 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.054032 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.054053 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.150936 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.156592 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.156642 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.156659 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.156683 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.156701 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.260784 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.260860 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.260880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.260910 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.260931 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.364620 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.364698 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.364742 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.364769 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.364787 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.468426 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.468466 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.468481 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.468505 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.468523 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.570777 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.570808 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.570819 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.570834 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.570846 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.673409 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.673453 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.673471 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.673494 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.673511 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.776178 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.776226 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.776243 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.776265 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.776282 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.879920 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.879966 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.879983 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.880010 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.880027 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.982858 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.982903 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.982920 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.982942 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:51 crc kubenswrapper[5010]: I1126 15:26:51.982957 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:51Z","lastTransitionTime":"2025-11-26T15:26:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.086377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.086422 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.086438 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.086456 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.086472 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.154607 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.189767 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.189837 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.189855 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.189922 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.189942 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.294127 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.294233 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.294253 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.294282 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.294304 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.398559 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.398635 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.398658 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.398690 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.398734 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.502616 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.502680 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.502699 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.502790 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.502818 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.606007 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.606094 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.606123 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.606161 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.606187 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.710536 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.710616 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.710640 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.710678 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.710699 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.814082 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.814174 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.814200 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.814230 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.814256 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.891880 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.892011 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.892086 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:52 crc kubenswrapper[5010]: E1126 15:26:52.892276 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:52 crc kubenswrapper[5010]: E1126 15:26:52.892502 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:52 crc kubenswrapper[5010]: E1126 15:26:52.892806 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.918771 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.918854 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.918885 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.918923 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:52 crc kubenswrapper[5010]: I1126 15:26:52.918948 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:52Z","lastTransitionTime":"2025-11-26T15:26:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.022976 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.023059 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.023078 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.023109 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.023130 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.131548 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.131609 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.131630 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.131662 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.131685 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.170529 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/0.log" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.176689 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd" exitCode=1 Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.176806 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.178237 5010 scope.go:117] "RemoveContainer" containerID="0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.203221 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.230167 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.235211 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.235251 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.235262 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.235280 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.235292 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.251156 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.274286 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.295304 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.319530 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.339583 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.339627 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.339647 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.339685 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.339740 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.344270 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.369700 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.393989 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.428374 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:52Z\\\",\\\"message\\\":\\\" reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533642 6373 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 15:26:52.533693 6373 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 15:26:52.533971 6373 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533992 6373 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 15:26:52.534018 6373 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 15:26:52.534092 6373 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 15:26:52.534183 6373 factory.go:656] Stopping watch factory\\\\nI1126 15:26:52.534205 6373 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 15:26:52.534219 6373 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:26:52.534232 6373 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.444064 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.444119 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.444139 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.444166 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.444186 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.468799 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.497594 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.519236 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.535166 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.546685 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.546738 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.546747 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.546762 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.546775 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.548960 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.586356 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.609232 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.630700 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.649961 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.650014 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.650029 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.650053 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.650067 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.650120 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.690957 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:52Z\\\",\\\"message\\\":\\\" reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533642 6373 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 15:26:52.533693 6373 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 15:26:52.533971 6373 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533992 6373 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 15:26:52.534018 6373 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 15:26:52.534092 6373 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 15:26:52.534183 6373 factory.go:656] Stopping watch factory\\\\nI1126 15:26:52.534205 6373 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 15:26:52.534219 6373 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:26:52.534232 6373 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.727525 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.747458 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.752885 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.752959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.752983 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.753013 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.753035 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.764925 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.787951 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.809354 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.824458 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.841788 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.855265 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.855484 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.855571 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.855699 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.855810 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.856612 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.870438 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.886583 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.900232 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:53Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.958773 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.958828 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.958846 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.958877 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:53 crc kubenswrapper[5010]: I1126 15:26:53.958897 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:53Z","lastTransitionTime":"2025-11-26T15:26:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.062879 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.062933 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.062949 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.062975 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.062993 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.166376 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.166464 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.166488 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.166519 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.166538 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.184619 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/0.log" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.188778 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.189070 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.215917 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.231349 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.249738 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.264927 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.270380 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.270476 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.270498 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.270540 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.270563 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.298371 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:52Z\\\",\\\"message\\\":\\\" reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533642 6373 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 15:26:52.533693 6373 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 15:26:52.533971 6373 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533992 6373 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 15:26:52.534018 6373 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 15:26:52.534092 6373 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 15:26:52.534183 6373 factory.go:656] Stopping watch factory\\\\nI1126 15:26:52.534205 6373 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 15:26:52.534219 6373 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:26:52.534232 6373 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.319280 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.340821 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.357768 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.373355 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.374011 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.374093 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.374113 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.374144 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.374165 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.390195 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.411259 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.434475 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.449204 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.468746 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.477145 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.477192 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.477206 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.477226 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.477240 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.485543 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:54Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.580950 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.581026 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.581044 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.581070 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.581088 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.690576 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.690660 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.690678 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.690723 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.690744 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.794800 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.794873 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.794891 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.794919 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.794938 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.890586 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.890733 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:54 crc kubenswrapper[5010]: E1126 15:26:54.890771 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:54 crc kubenswrapper[5010]: E1126 15:26:54.890935 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.891152 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:54 crc kubenswrapper[5010]: E1126 15:26:54.891434 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.898198 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.898234 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.898244 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.898260 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:54 crc kubenswrapper[5010]: I1126 15:26:54.898271 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:54Z","lastTransitionTime":"2025-11-26T15:26:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.001262 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.001336 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.001366 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.001401 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.001425 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.015534 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc"] Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.016223 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.026848 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.026878 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.040255 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.059635 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.079863 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.080096 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.080670 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.080751 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.080795 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64zw4\" (UniqueName: \"kubernetes.io/projected/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-kube-api-access-64zw4\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.105223 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.105306 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.105327 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.105362 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.105385 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.112379 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:52Z\\\",\\\"message\\\":\\\" reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533642 6373 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 15:26:52.533693 6373 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 15:26:52.533971 6373 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533992 6373 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 15:26:52.534018 6373 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 15:26:52.534092 6373 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 15:26:52.534183 6373 factory.go:656] Stopping watch factory\\\\nI1126 15:26:52.534205 6373 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 15:26:52.534219 6373 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:26:52.534232 6373 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.153216 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.176798 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.182041 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.182170 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.182218 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.182257 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64zw4\" (UniqueName: \"kubernetes.io/projected/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-kube-api-access-64zw4\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.183330 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.183758 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.192783 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.197053 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/1.log" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.198381 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/0.log" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.200637 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.203883 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590" exitCode=1 Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.203945 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.204039 5010 scope.go:117] "RemoveContainer" containerID="0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.206980 5010 scope.go:117] "RemoveContainer" containerID="6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590" Nov 26 15:26:55 crc kubenswrapper[5010]: E1126 15:26:55.207425 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.208827 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.208891 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.208911 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.208939 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.208961 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.213554 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64zw4\" (UniqueName: \"kubernetes.io/projected/cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2-kube-api-access-64zw4\") pod \"ovnkube-control-plane-749d76644c-qfvdc\" (UID: \"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.223995 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.245586 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.270912 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.294925 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.309453 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.311952 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.312009 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.312029 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.312055 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.312074 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.327359 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.339556 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.347849 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: W1126 15:26:55.359083 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb2cca68_6cd3_4ee3_9a3b_b1d22857d2b2.slice/crio-981eff429338bb88860aa1e5f198e01f4c0f5e77e31aa165d89bb219dd65dee0 WatchSource:0}: Error finding container 981eff429338bb88860aa1e5f198e01f4c0f5e77e31aa165d89bb219dd65dee0: Status 404 returned error can't find the container with id 981eff429338bb88860aa1e5f198e01f4c0f5e77e31aa165d89bb219dd65dee0 Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.368904 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.391179 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.410197 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.414997 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.419300 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.419345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.419369 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.419397 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.436654 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.455990 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.474947 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.497997 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.518855 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.523592 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.523632 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.523645 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.523662 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.523674 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.532629 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.550590 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.563551 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.578056 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.592702 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.606427 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.626562 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.626606 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.626616 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.626634 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.626648 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.636047 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:52Z\\\",\\\"message\\\":\\\" reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533642 6373 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 15:26:52.533693 6373 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 15:26:52.533971 6373 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533992 6373 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 15:26:52.534018 6373 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 15:26:52.534092 6373 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 15:26:52.534183 6373 factory.go:656] Stopping watch factory\\\\nI1126 15:26:52.534205 6373 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 15:26:52.534219 6373 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:26:52.534232 6373 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.662369 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.680282 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.701259 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.729254 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.729315 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.729332 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.729354 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.729370 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.790341 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-df2ll"] Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.791333 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:55 crc kubenswrapper[5010]: E1126 15:26:55.791450 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.814299 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.832802 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.832859 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.832876 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.832905 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.832925 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.837954 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.866581 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.888802 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.889163 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kh4k\" (UniqueName: \"kubernetes.io/projected/fd9f5a65-e633-439f-8e8d-b760d20a3223-kube-api-access-2kh4k\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.889245 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.912755 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.937048 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.937097 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.937111 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.937128 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.937143 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:55Z","lastTransitionTime":"2025-11-26T15:26:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.938929 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.974917 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:55Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.990632 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kh4k\" (UniqueName: \"kubernetes.io/projected/fd9f5a65-e633-439f-8e8d-b760d20a3223-kube-api-access-2kh4k\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:55 crc kubenswrapper[5010]: I1126 15:26:55.990745 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:55 crc kubenswrapper[5010]: E1126 15:26:55.990969 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:55 crc kubenswrapper[5010]: E1126 15:26:55.991098 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:56.491070125 +0000 UTC m=+37.281787273 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.007991 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.022892 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kh4k\" (UniqueName: \"kubernetes.io/projected/fd9f5a65-e633-439f-8e8d-b760d20a3223-kube-api-access-2kh4k\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.029677 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.039622 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.039657 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.039669 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.039695 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.039719 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.049082 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.061823 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.084653 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:52Z\\\",\\\"message\\\":\\\" reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533642 6373 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 15:26:52.533693 6373 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 15:26:52.533971 6373 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533992 6373 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 15:26:52.534018 6373 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 15:26:52.534092 6373 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 15:26:52.534183 6373 factory.go:656] Stopping watch factory\\\\nI1126 15:26:52.534205 6373 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 15:26:52.534219 6373 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:26:52.534232 6373 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.096827 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.108382 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.118779 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.126261 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.138542 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.142344 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.142371 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.142380 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.142394 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.142406 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.210158 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" event={"ID":"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2","Type":"ContainerStarted","Data":"751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.210232 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" event={"ID":"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2","Type":"ContainerStarted","Data":"73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.210252 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" event={"ID":"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2","Type":"ContainerStarted","Data":"981eff429338bb88860aa1e5f198e01f4c0f5e77e31aa165d89bb219dd65dee0"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.212597 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/1.log" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.225803 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.238581 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.245461 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.245520 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.245539 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.245567 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.245585 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.270691 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.290996 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.314267 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.332807 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.348985 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.349052 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.349072 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.349103 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.349126 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.354356 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://0e57483c135c2def338abfc271816f0c258ad9b5e181cc86b0397f0e4970c1fd\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:52Z\\\",\\\"message\\\":\\\" reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533642 6373 reflector.go:311] Stopping reflector *v1.NetworkAttachmentDefinition (0s) from github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/informers/externalversions/factory.go:117\\\\nI1126 15:26:52.533693 6373 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 15:26:52.533971 6373 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:26:52.533992 6373 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 15:26:52.534018 6373 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 15:26:52.534092 6373 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 15:26:52.534183 6373 factory.go:656] Stopping watch factory\\\\nI1126 15:26:52.534205 6373 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 15:26:52.534219 6373 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:26:52.534232 6373 handler.go:208] Removed *v1.Node ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:49Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.372788 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.395114 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.414671 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.432199 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.446581 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.451271 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.451344 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.451368 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.451397 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.451420 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.465349 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.484034 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.496580 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.496769 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.496839 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:57.496821717 +0000 UTC m=+38.287538865 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.500835 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.524463 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.543100 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.554670 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.554782 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.554812 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.554844 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.554863 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.597867 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.598097 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598118 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:27:12.59808577 +0000 UTC m=+53.388802948 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.598159 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.598206 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598268 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598410 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:12.598371807 +0000 UTC m=+53.389088995 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598426 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598534 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:12.598505231 +0000 UTC m=+53.389222419 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598531 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598597 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598622 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.598776 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:12.598738736 +0000 UTC m=+53.389455954 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.629344 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.629416 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.629434 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.629463 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.629486 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.656657 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.664180 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.664399 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.664531 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.664880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.665022 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.686884 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.692578 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.692794 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.692922 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.693048 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.693165 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.699551 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.699790 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.699812 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.699827 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.699876 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:12.699861216 +0000 UTC m=+53.490578384 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.713185 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.720449 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.720828 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.721090 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.721349 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.721582 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.747620 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.754320 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.754450 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.754470 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.754500 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.754521 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.777079 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:56Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.777326 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.779737 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.779825 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.779849 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.779879 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.779901 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.883145 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.883232 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.883251 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.883276 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.883292 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.891681 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.891791 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.891791 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.892181 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.892312 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:56 crc kubenswrapper[5010]: E1126 15:26:56.892605 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.986174 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.986225 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.986242 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.986261 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:56 crc kubenswrapper[5010]: I1126 15:26:56.986277 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:56Z","lastTransitionTime":"2025-11-26T15:26:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.088797 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.088851 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.088864 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.088884 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.088899 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.092336 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.093176 5010 scope.go:117] "RemoveContainer" containerID="6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590" Nov 26 15:26:57 crc kubenswrapper[5010]: E1126 15:26:57.093360 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.115050 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.135531 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.158863 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.174899 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.193163 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.193216 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.193238 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.193268 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.193292 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.193402 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.214328 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.235826 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.271882 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.296043 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.296095 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.296108 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.296126 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.296137 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.296595 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.322165 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.343381 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.378325 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.400244 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.400292 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.400305 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.400325 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.400339 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.401991 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.425028 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.446241 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.461497 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.479785 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:57Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.503379 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.503441 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.503454 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.503478 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.503496 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.506960 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:57 crc kubenswrapper[5010]: E1126 15:26:57.507157 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:57 crc kubenswrapper[5010]: E1126 15:26:57.507250 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:26:59.507223044 +0000 UTC m=+40.297940202 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.607067 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.607140 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.607164 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.607197 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.607217 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.711447 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.711522 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.711545 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.711576 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.711594 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.815217 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.815285 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.815304 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.815347 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.815370 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.891521 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:57 crc kubenswrapper[5010]: E1126 15:26:57.891771 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.918275 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.918342 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.918361 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.918385 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:57 crc kubenswrapper[5010]: I1126 15:26:57.918408 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:57Z","lastTransitionTime":"2025-11-26T15:26:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.021992 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.022766 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.022833 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.022872 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.022898 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.126578 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.126653 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.126682 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.126763 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.126787 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.229052 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.229148 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.229183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.229222 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.229247 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.333174 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.333250 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.333271 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.333301 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.333327 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.436858 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.436937 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.436956 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.436985 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.437009 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.540979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.541052 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.541075 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.541111 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.541136 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.644388 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.644479 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.644551 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.644579 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.644635 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.748956 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.749018 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.749034 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.749057 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.749075 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.852438 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.852512 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.852532 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.852561 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.852585 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.891076 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.891117 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.891117 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:26:58 crc kubenswrapper[5010]: E1126 15:26:58.891302 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:26:58 crc kubenswrapper[5010]: E1126 15:26:58.891517 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:26:58 crc kubenswrapper[5010]: E1126 15:26:58.891743 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.955602 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.955679 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.955696 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.955748 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:58 crc kubenswrapper[5010]: I1126 15:26:58.955762 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:58Z","lastTransitionTime":"2025-11-26T15:26:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.059123 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.059219 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.059244 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.059271 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.059313 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.163340 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.163413 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.163434 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.163464 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.163486 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.267062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.267150 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.267186 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.267220 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.267241 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.371074 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.371145 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.371184 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.371217 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.371236 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.475100 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.475167 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.475189 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.475219 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.475242 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.531157 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:59 crc kubenswrapper[5010]: E1126 15:26:59.531467 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:59 crc kubenswrapper[5010]: E1126 15:26:59.531606 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:03.531567754 +0000 UTC m=+44.322284942 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.578630 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.578702 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.578862 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.578898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.578925 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.682116 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.682190 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.682216 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.682250 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.682275 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.785907 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.785998 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.786023 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.786055 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.786083 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.889176 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.889253 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.889270 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.889300 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.889321 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.890999 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:26:59 crc kubenswrapper[5010]: E1126 15:26:59.891220 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.913763 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:59Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.946681 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:59Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.966475 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:59Z is after 2025-08-24T17:21:41Z" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.992309 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.992386 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.992408 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.992440 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.992463 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:26:59Z","lastTransitionTime":"2025-11-26T15:26:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:26:59 crc kubenswrapper[5010]: I1126 15:26:59.999085 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:26:59Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.019412 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.039841 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.059623 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.081566 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.095082 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.095135 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.095153 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.095183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.095202 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.107789 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.139330 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.165248 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.184700 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.200263 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.200391 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.200415 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.200442 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.200461 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.206090 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.231286 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.249011 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.271002 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.292758 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:00Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.303142 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.303209 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.303228 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.303261 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.303281 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.406849 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.406938 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.406959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.406997 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.407019 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.510393 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.510475 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.510499 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.510526 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.510600 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.614188 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.614250 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.614269 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.614295 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.614313 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.717602 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.717668 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.717685 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.717741 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.717761 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.821404 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.821475 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.821497 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.821524 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.821543 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.890691 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.890760 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.890805 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:00 crc kubenswrapper[5010]: E1126 15:27:00.890900 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:00 crc kubenswrapper[5010]: E1126 15:27:00.891065 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:00 crc kubenswrapper[5010]: E1126 15:27:00.891270 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.925010 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.925049 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.925061 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.925078 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:00 crc kubenswrapper[5010]: I1126 15:27:00.925088 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:00Z","lastTransitionTime":"2025-11-26T15:27:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.028377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.028420 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.028429 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.028463 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.028473 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.132383 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.132468 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.132493 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.132527 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.132553 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.236155 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.236208 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.236220 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.236241 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.236257 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.339754 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.339837 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.339862 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.339897 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.339930 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.443270 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.443345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.443367 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.443391 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.443412 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.546777 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.546856 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.546874 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.546907 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.546928 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.650469 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.650575 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.650601 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.650639 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.650666 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.753917 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.753989 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.754009 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.754040 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.754062 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.857983 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.858051 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.858069 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.858097 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.858117 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.891700 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:01 crc kubenswrapper[5010]: E1126 15:27:01.891983 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.961428 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.961492 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.961510 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.961537 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:01 crc kubenswrapper[5010]: I1126 15:27:01.961560 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:01Z","lastTransitionTime":"2025-11-26T15:27:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.065002 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.065075 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.065096 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.065129 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.065150 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.174301 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.174401 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.174429 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.174467 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.174739 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.279006 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.279090 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.279112 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.279150 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.279172 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.382493 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.382557 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.382577 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.382606 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.382628 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.485872 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.485940 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.485959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.485985 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.486006 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.589120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.589184 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.589208 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.589234 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.589252 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.691866 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.691946 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.691966 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.692001 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.692021 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.794785 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.794864 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.794888 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.794918 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.794941 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.890879 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.890888 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.890888 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:02 crc kubenswrapper[5010]: E1126 15:27:02.891290 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:02 crc kubenswrapper[5010]: E1126 15:27:02.891082 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:02 crc kubenswrapper[5010]: E1126 15:27:02.891394 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.898203 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.898258 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.898275 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.898299 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:02 crc kubenswrapper[5010]: I1126 15:27:02.898321 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:02Z","lastTransitionTime":"2025-11-26T15:27:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.002150 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.002234 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.002256 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.002282 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.002301 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.105663 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.105728 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.105741 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.105758 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.105772 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.208112 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.208182 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.208200 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.208232 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.208251 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.310977 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.311045 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.311062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.311087 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.311105 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.413375 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.413433 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.413464 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.413488 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.413507 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.516899 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.516959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.516977 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.517003 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.517021 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.579117 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:03 crc kubenswrapper[5010]: E1126 15:27:03.579331 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:03 crc kubenswrapper[5010]: E1126 15:27:03.579454 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:11.579422736 +0000 UTC m=+52.370139914 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.620171 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.620211 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.620224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.620245 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.620259 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.723064 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.723147 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.723175 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.723209 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.723233 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.827148 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.827206 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.827224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.827255 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.827274 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.891574 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:03 crc kubenswrapper[5010]: E1126 15:27:03.891855 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.930226 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.930271 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.930306 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.930326 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:03 crc kubenswrapper[5010]: I1126 15:27:03.930338 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:03Z","lastTransitionTime":"2025-11-26T15:27:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.034170 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.034274 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.034295 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.034324 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.034346 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.138074 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.138134 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.138151 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.138177 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.138194 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.240627 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.240754 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.240778 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.240811 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.240837 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.344065 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.344166 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.344188 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.344224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.344279 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.454410 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.454490 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.454510 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.454540 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.454561 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.557996 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.558063 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.558114 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.558143 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.558158 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.662062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.662133 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.662153 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.662185 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.662207 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.765979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.766053 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.766071 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.766105 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.766126 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.868880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.868953 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.868976 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.869008 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.869034 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.891439 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.891480 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.891545 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:04 crc kubenswrapper[5010]: E1126 15:27:04.891649 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:04 crc kubenswrapper[5010]: E1126 15:27:04.891852 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:04 crc kubenswrapper[5010]: E1126 15:27:04.892093 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.973008 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.973146 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.973222 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.973262 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:04 crc kubenswrapper[5010]: I1126 15:27:04.973331 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:04Z","lastTransitionTime":"2025-11-26T15:27:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.076989 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.077074 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.077103 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.077141 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.077166 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.180676 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.180760 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.180778 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.180809 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.180830 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.284168 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.284232 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.284244 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.284267 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.284283 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.389481 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.389577 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.389603 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.389638 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.389664 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.493321 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.493415 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.493439 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.493477 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.493508 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.596979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.597065 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.597080 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.597102 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.597138 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.700117 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.700190 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.700205 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.700562 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.700611 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.804249 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.804308 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.804321 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.804345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.804361 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.891637 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:05 crc kubenswrapper[5010]: E1126 15:27:05.891918 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.907329 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.907375 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.907387 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.907403 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:05 crc kubenswrapper[5010]: I1126 15:27:05.907415 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:05Z","lastTransitionTime":"2025-11-26T15:27:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.011282 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.011351 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.011368 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.011396 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.011416 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.114569 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.114642 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.114664 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.114692 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.114739 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.217444 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.217497 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.217515 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.217542 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.217559 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.321638 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.321694 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.321742 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.321766 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.321785 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.425011 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.425067 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.425087 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.425113 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.425131 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.527892 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.527965 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.527975 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.527991 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.528001 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.631054 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.631120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.631138 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.631154 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.631165 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.734322 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.734375 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.734385 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.734403 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.734415 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.838558 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.838909 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.838948 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.838981 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.839018 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.891548 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.891583 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:06 crc kubenswrapper[5010]: E1126 15:27:06.891698 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.891746 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:06 crc kubenswrapper[5010]: E1126 15:27:06.891973 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:06 crc kubenswrapper[5010]: E1126 15:27:06.892166 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.942385 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.942474 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.942493 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.942517 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.942534 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.999569 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.999662 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:06 crc kubenswrapper[5010]: I1126 15:27:06.999688 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:06.999763 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:06.999791 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:06Z","lastTransitionTime":"2025-11-26T15:27:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: E1126 15:27:07.017143 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:06Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:07Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.021688 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.021783 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.021803 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.021829 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.021848 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: E1126 15:27:07.048643 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:07Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.054125 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.054327 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.054447 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.054609 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.054793 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: E1126 15:27:07.071748 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:07Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.077242 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.077320 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.077345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.077372 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.077392 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: E1126 15:27:07.097777 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:07Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.102864 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.102923 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.102948 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.102979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.103003 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: E1126 15:27:07.125034 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:07Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:07Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:07 crc kubenswrapper[5010]: E1126 15:27:07.125261 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.127149 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.127210 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.127227 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.127245 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.127255 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.230053 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.230110 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.230118 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.230131 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.230162 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.332186 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.332252 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.332277 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.332310 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.332335 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.435333 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.435395 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.435417 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.435441 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.435459 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.539688 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.539783 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.539803 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.539844 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.539862 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.643044 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.643093 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.643111 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.643132 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.643150 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.746803 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.746860 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.746880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.746905 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.746921 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.849823 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.849907 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.849924 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.849952 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.849972 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.891029 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:07 crc kubenswrapper[5010]: E1126 15:27:07.891308 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.953199 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.953276 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.953296 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.953320 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:07 crc kubenswrapper[5010]: I1126 15:27:07.953340 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:07Z","lastTransitionTime":"2025-11-26T15:27:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.056789 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.056894 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.056916 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.056976 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.057008 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.160582 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.160642 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.160661 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.160685 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.160729 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.263399 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.263491 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.263515 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.263547 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.263571 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.366690 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.366816 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.366837 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.366865 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.366885 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.469844 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.469928 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.469953 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.469986 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.470010 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.572674 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.572788 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.572813 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.572848 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.572871 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.676866 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.676938 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.676957 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.676984 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.677001 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.780310 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.780373 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.780392 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.780420 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.780437 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.883559 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.883627 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.883647 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.883672 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.883692 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.890951 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:08 crc kubenswrapper[5010]: E1126 15:27:08.891110 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.891553 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.891589 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:08 crc kubenswrapper[5010]: E1126 15:27:08.891789 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:08 crc kubenswrapper[5010]: E1126 15:27:08.892351 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.987639 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.987744 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.987773 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.987807 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:08 crc kubenswrapper[5010]: I1126 15:27:08.987831 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:08Z","lastTransitionTime":"2025-11-26T15:27:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.091589 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.091662 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.091681 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.091705 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.091785 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.194906 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.194984 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.195008 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.195039 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.195066 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.298525 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.298593 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.298615 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.298643 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.298667 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.402055 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.402120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.402141 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.402169 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.402191 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.505225 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.505283 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.505299 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.505323 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.505339 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.608746 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.608822 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.608840 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.608866 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.608886 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.711912 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.711971 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.711988 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.712010 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.712028 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.815206 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.815264 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.815280 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.815306 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.815323 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.890853 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:09 crc kubenswrapper[5010]: E1126 15:27:09.891102 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.892501 5010 scope.go:117] "RemoveContainer" containerID="6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.914306 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:09Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.918922 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.918998 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.919022 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.919048 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.919067 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:09Z","lastTransitionTime":"2025-11-26T15:27:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.941026 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:09Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:09 crc kubenswrapper[5010]: I1126 15:27:09.983410 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:09Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.003298 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.021232 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.021533 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.021555 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.021563 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.021577 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.021588 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.037099 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.065997 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.078685 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.101325 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.117072 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.124374 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.124410 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.124423 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.124447 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.124459 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.135118 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.147321 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.161814 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.175530 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.195310 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.220465 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.226422 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.226479 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.226496 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.226521 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.226537 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.242071 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.274342 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/1.log" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.277181 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.277610 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.290655 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.312495 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.324533 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.328807 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.328848 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.328860 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.328882 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.328896 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.335611 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.348120 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.360752 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.387521 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.448934 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.449029 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.449068 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.449082 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.449098 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.449109 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.467289 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.490677 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.500561 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.518637 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.529771 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.542494 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.551261 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.551302 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.551315 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.551333 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.551344 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.555430 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.565395 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.578108 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:10Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.653566 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.653621 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.653633 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.653651 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.653663 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.756434 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.756507 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.756525 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.756550 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.756570 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.860417 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.860471 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.860489 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.860514 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.860536 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.891419 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.891490 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.891415 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:10 crc kubenswrapper[5010]: E1126 15:27:10.891656 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:10 crc kubenswrapper[5010]: E1126 15:27:10.891808 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:10 crc kubenswrapper[5010]: E1126 15:27:10.891890 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.963913 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.964005 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.964031 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.964063 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:10 crc kubenswrapper[5010]: I1126 15:27:10.964086 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:10Z","lastTransitionTime":"2025-11-26T15:27:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.067638 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.067757 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.067776 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.067803 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.067820 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.170567 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.170646 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.170666 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.170695 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.170740 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.274058 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.274104 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.274115 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.274136 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.274149 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.284078 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/2.log" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.285265 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/1.log" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.289223 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3" exitCode=1 Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.289270 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.289352 5010 scope.go:117] "RemoveContainer" containerID="6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.290366 5010 scope.go:117] "RemoveContainer" containerID="26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3" Nov 26 15:27:11 crc kubenswrapper[5010]: E1126 15:27:11.290681 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.315425 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.337977 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.357355 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.373081 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.377285 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.377348 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.377366 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.377394 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.377447 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.390185 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.411752 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.433105 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.458622 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.477183 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.480489 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.480549 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.480570 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.480595 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.480615 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.499337 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.517596 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.534440 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.566839 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.583202 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.583267 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.583287 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.583312 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.583331 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.587156 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.607480 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.625942 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.657421 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6c2de16ce85dbbd10fcd3f3ec825f38c7c81cd66b6100f43b03bb82b51400590\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:26:54Z\\\",\\\"message\\\":\\\"w:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:960d98b2-dc64-4e93-a4b6-9b19847af71e}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:} {Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:26:54.319006 6508 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:11Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.676907 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:11 crc kubenswrapper[5010]: E1126 15:27:11.677169 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:11 crc kubenswrapper[5010]: E1126 15:27:11.677284 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:27.67725857 +0000 UTC m=+68.467975758 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.686234 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.686312 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.686333 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.686358 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.686376 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.789154 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.789224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.789241 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.789266 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.789284 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.890659 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:11 crc kubenswrapper[5010]: E1126 15:27:11.890922 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.892214 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.892313 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.892377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.892403 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.892422 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.995830 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.995924 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.995944 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.995969 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:11 crc kubenswrapper[5010]: I1126 15:27:11.995987 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:11Z","lastTransitionTime":"2025-11-26T15:27:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.099392 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.099844 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.100010 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.100160 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.100322 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.203832 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.204180 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.204357 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.204548 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.204945 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.299699 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/2.log" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.306471 5010 scope.go:117] "RemoveContainer" containerID="26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.306888 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.311804 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.311882 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.311904 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.311935 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.311959 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.333980 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.354810 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.395453 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.415849 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.415917 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.415936 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.415962 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.415981 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.417414 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.447535 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.469046 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.486443 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.503934 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.518444 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.518502 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.518528 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.518556 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.518577 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.520929 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.542328 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.573937 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.598267 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.616346 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.623561 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.623604 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.623615 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.623634 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.623646 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.640089 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.663283 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.686577 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.687978 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.688133 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688164 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:27:44.688139289 +0000 UTC m=+85.478856447 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.688207 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688247 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.688277 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688309 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:44.688290813 +0000 UTC m=+85.479008001 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688421 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688486 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:44.688470107 +0000 UTC m=+85.479187275 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688494 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688555 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688578 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.688687 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:44.688654072 +0000 UTC m=+85.479371270 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.697861 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:12Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.726136 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.726198 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.726221 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.726249 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.726273 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.789255 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.789513 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.789545 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.789566 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.789645 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:44.789622498 +0000 UTC m=+85.580339686 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.829377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.829451 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.829474 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.829503 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.829525 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.890854 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.890901 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.890877 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.891063 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.891171 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:12 crc kubenswrapper[5010]: E1126 15:27:12.891309 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.932534 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.932588 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.932600 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.932618 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:12 crc kubenswrapper[5010]: I1126 15:27:12.932633 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:12Z","lastTransitionTime":"2025-11-26T15:27:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.034973 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.035009 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.035017 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.035030 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.035039 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.138257 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.138322 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.138346 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.138378 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.138412 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.241450 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.241533 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.241559 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.241595 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.241619 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.344476 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.344546 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.344570 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.344602 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.344627 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.448483 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.448548 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.448573 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.448603 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.448629 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.551759 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.552170 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.552319 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.552502 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.552651 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.656264 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.656308 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.656326 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.656351 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.656368 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.759810 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.759859 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.759878 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.759902 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.759922 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.862896 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.862951 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.862968 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.862991 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.863012 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.891181 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:13 crc kubenswrapper[5010]: E1126 15:27:13.891367 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.966803 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.966873 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.966895 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.966926 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:13 crc kubenswrapper[5010]: I1126 15:27:13.966949 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:13Z","lastTransitionTime":"2025-11-26T15:27:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.061084 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.070094 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.070137 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.070148 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.070164 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.070177 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.077077 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.080852 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.097299 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.115474 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.133979 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.152661 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.171559 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.173467 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.173519 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.173537 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.173559 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.173577 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.194073 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.216061 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.242035 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.264507 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.277170 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.277232 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.277249 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.277275 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.277293 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.283182 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.305787 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.325701 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.360808 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.380196 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.380252 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.380269 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.380293 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.380311 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.381211 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.419372 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.442262 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:14Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.484211 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.484345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.484371 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.484401 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.484423 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.587534 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.587940 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.588046 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.588185 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.588279 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.691167 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.691218 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.691234 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.691253 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.691269 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.794796 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.794874 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.794897 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.794922 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.794942 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.891416 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:14 crc kubenswrapper[5010]: E1126 15:27:14.891590 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.891659 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.891675 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:14 crc kubenswrapper[5010]: E1126 15:27:14.892025 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:14 crc kubenswrapper[5010]: E1126 15:27:14.892094 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.898302 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.898349 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.898366 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.898389 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:14 crc kubenswrapper[5010]: I1126 15:27:14.898406 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:14Z","lastTransitionTime":"2025-11-26T15:27:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.000622 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.001019 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.001185 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.001328 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.001459 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.105649 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.105731 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.105750 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.105773 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.105793 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.209213 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.209275 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.209294 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.209321 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.209343 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.313038 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.313100 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.313113 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.313130 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.313143 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.416206 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.416277 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.416298 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.416325 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.416344 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.520179 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.520257 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.520275 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.520301 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.520319 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.623484 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.623549 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.623570 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.623596 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.623614 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.726589 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.726957 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.727112 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.727224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.727321 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.830556 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.830615 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.830632 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.830663 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.830681 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.891123 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:15 crc kubenswrapper[5010]: E1126 15:27:15.891375 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.933879 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.934267 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.934426 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.934584 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:15 crc kubenswrapper[5010]: I1126 15:27:15.934768 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:15Z","lastTransitionTime":"2025-11-26T15:27:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.038381 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.038475 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.038496 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.038523 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.038542 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.145933 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.146002 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.146021 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.146047 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.146065 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.249181 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.249254 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.249272 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.249298 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.249317 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.352988 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.353043 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.353060 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.353120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.353141 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.456335 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.456400 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.456420 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.456446 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.456464 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.559287 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.559360 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.559379 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.559404 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.559421 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.662702 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.662804 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.662821 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.662846 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.662864 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.766889 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.766960 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.766980 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.767012 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.767036 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.870167 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.870217 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.870234 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.870258 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.870275 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.891143 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.891182 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.891182 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:16 crc kubenswrapper[5010]: E1126 15:27:16.891348 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:16 crc kubenswrapper[5010]: E1126 15:27:16.891492 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:16 crc kubenswrapper[5010]: E1126 15:27:16.891581 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.973980 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.974041 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.974053 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.974089 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:16 crc kubenswrapper[5010]: I1126 15:27:16.974102 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:16Z","lastTransitionTime":"2025-11-26T15:27:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.077121 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.077213 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.077238 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.077269 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.077297 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.179915 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.179974 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.179990 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.180014 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.180032 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.199986 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.200047 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.200066 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.200089 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.200105 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: E1126 15:27:17.222405 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:17Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.228146 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.228213 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.228235 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.228266 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.228287 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: E1126 15:27:17.248580 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:17Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.254571 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.254627 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.254645 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.254671 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.254692 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: E1126 15:27:17.274778 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:17Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.280330 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.280390 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.280414 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.280440 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.280462 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: E1126 15:27:17.301890 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:17Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.306995 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.307051 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.307075 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.307104 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.307124 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: E1126 15:27:17.330885 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:17Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:17 crc kubenswrapper[5010]: E1126 15:27:17.331519 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.333635 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.333874 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.334038 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.334183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.334320 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.436882 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.436931 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.436951 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.436974 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.436992 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.540568 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.540660 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.540743 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.540814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.540837 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.643934 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.644059 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.644079 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.644104 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.644121 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.747938 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.748091 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.748114 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.748139 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.748160 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.852325 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.852418 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.852444 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.852476 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.852503 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.891297 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:17 crc kubenswrapper[5010]: E1126 15:27:17.891856 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.955743 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.955826 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.955846 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.955873 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:17 crc kubenswrapper[5010]: I1126 15:27:17.955893 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:17Z","lastTransitionTime":"2025-11-26T15:27:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.059497 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.059556 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.059579 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.059606 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.059629 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.163078 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.163137 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.163162 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.163188 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.163208 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.266199 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.266245 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.266258 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.266274 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.266286 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.368692 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.368772 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.368785 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.368804 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.368820 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.472448 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.472489 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.472501 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.472519 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.472533 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.576222 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.576279 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.576297 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.576320 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.576338 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.679270 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.679341 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.679353 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.679372 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.679383 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.783286 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.783383 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.783417 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.783453 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.783478 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.886496 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.886547 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.886563 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.886589 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.886607 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.890673 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.890696 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.890765 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:18 crc kubenswrapper[5010]: E1126 15:27:18.890877 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:18 crc kubenswrapper[5010]: E1126 15:27:18.891016 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:18 crc kubenswrapper[5010]: E1126 15:27:18.891040 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.990299 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.990363 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.990377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.990408 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:18 crc kubenswrapper[5010]: I1126 15:27:18.990424 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:18Z","lastTransitionTime":"2025-11-26T15:27:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.093972 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.094031 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.094051 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.094079 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.094098 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.197150 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.197205 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.197223 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.197250 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.197272 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.301267 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.301333 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.301354 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.301381 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.301402 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.404423 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.404517 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.404542 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.404573 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.404593 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.508129 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.508198 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.508217 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.508239 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.508255 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.611440 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.611509 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.611527 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.611555 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.611574 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.714651 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.714759 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.714790 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.714822 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.714872 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.816962 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.817045 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.817066 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.817093 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.817113 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.890838 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:19 crc kubenswrapper[5010]: E1126 15:27:19.891133 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.910910 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:19Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.921462 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.921540 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.921565 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.921598 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.921624 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:19Z","lastTransitionTime":"2025-11-26T15:27:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.933778 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:19Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.956481 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:19Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:19 crc kubenswrapper[5010]: I1126 15:27:19.978595 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:19Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.003664 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.024884 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.024992 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.025045 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.025077 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.025129 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.026087 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.048511 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.076167 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.095860 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.116005 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.128048 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.128136 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.128220 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.128391 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.128569 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.138318 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.156537 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.191427 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.209026 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.234266 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.234344 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.234361 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.234388 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.234406 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.246502 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.270891 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.289160 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.311422 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:20Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.337291 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.337359 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.337379 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.337411 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.337431 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.440978 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.441040 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.441060 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.441089 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.441110 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.544619 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.544680 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.544698 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.544751 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.544773 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.648421 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.648483 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.648500 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.648527 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.648546 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.752790 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.752873 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.752893 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.752926 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.752949 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.858986 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.859048 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.859067 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.859099 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.859119 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.890921 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.891004 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.891034 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:20 crc kubenswrapper[5010]: E1126 15:27:20.891149 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:20 crc kubenswrapper[5010]: E1126 15:27:20.891377 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:20 crc kubenswrapper[5010]: E1126 15:27:20.891955 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.963825 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.963914 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.963939 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.963974 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:20 crc kubenswrapper[5010]: I1126 15:27:20.963998 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:20Z","lastTransitionTime":"2025-11-26T15:27:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.068108 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.068214 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.068283 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.068323 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.068396 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.172766 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.172859 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.172890 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.172929 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.172951 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.277153 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.277208 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.277224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.277244 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.277258 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.380276 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.380337 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.380356 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.380386 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.380409 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.483748 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.483827 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.483838 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.483860 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.483872 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.586407 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.586457 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.586473 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.586496 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.586513 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.689814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.689946 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.689967 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.690052 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.690087 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.794519 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.794598 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.794618 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.794667 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.794688 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.890863 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:21 crc kubenswrapper[5010]: E1126 15:27:21.891136 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.898874 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.898942 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.898962 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.898991 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:21 crc kubenswrapper[5010]: I1126 15:27:21.899013 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:21Z","lastTransitionTime":"2025-11-26T15:27:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.003455 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.003546 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.003572 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.003613 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.003642 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.107127 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.107203 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.107228 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.107257 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.107281 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.210979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.211020 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.211031 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.211049 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.211062 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.314907 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.314972 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.314991 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.315021 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.315042 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.419052 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.419100 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.419116 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.419142 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.419159 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.522262 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.522328 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.522346 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.522376 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.522397 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.625884 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.625958 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.625983 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.626015 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.626037 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.729293 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.729350 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.729365 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.729389 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.729405 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.832888 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.832955 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.832974 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.833005 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.833025 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.891238 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:22 crc kubenswrapper[5010]: E1126 15:27:22.891483 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.891754 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:22 crc kubenswrapper[5010]: E1126 15:27:22.891822 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.891975 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:22 crc kubenswrapper[5010]: E1126 15:27:22.892038 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.936201 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.936235 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.936243 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.936261 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:22 crc kubenswrapper[5010]: I1126 15:27:22.936273 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:22Z","lastTransitionTime":"2025-11-26T15:27:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.042261 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.042315 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.042334 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.042361 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.042379 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.145870 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.145959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.145978 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.146403 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.146451 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.249801 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.249861 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.249880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.249911 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.249933 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.353740 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.353797 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.353814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.353841 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.353858 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.457898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.457959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.457980 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.458007 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.458027 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.560737 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.560806 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.560823 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.560849 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.560877 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.664482 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.664557 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.664577 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.664612 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.664633 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.768355 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.768428 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.768451 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.768484 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.768510 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.871374 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.871440 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.871460 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.871485 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.871507 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.891078 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:23 crc kubenswrapper[5010]: E1126 15:27:23.891278 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.975127 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.975201 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.975224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.975258 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:23 crc kubenswrapper[5010]: I1126 15:27:23.975285 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:23Z","lastTransitionTime":"2025-11-26T15:27:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.079529 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.079623 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.079645 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.079677 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.079697 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.183001 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.183120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.183141 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.183175 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.183195 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.285633 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.285736 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.285758 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.285794 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.285818 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.389498 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.389580 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.389608 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.389641 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.389667 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.493643 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.493701 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.493749 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.493779 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.493800 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.596643 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.596683 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.596696 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.596736 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.596749 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.700406 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.700471 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.700491 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.700575 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.700596 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.804907 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.804981 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.805000 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.805031 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.805055 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.891373 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.891444 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:24 crc kubenswrapper[5010]: E1126 15:27:24.891521 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:24 crc kubenswrapper[5010]: E1126 15:27:24.891672 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.891964 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:24 crc kubenswrapper[5010]: E1126 15:27:24.892370 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.908336 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.908380 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.908396 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.908413 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:24 crc kubenswrapper[5010]: I1126 15:27:24.908428 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:24Z","lastTransitionTime":"2025-11-26T15:27:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.012445 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.012516 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.012532 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.012555 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.012573 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.116252 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.117003 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.117073 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.117189 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.117266 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.222005 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.222355 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.222420 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.222520 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.222582 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.325593 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.325645 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.325660 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.325682 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.325698 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.428634 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.428980 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.429111 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.429210 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.429304 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.534320 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.534420 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.534451 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.534491 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.534521 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.639362 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.639445 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.639469 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.639504 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.639530 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.743196 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.743255 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.743273 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.743299 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.743322 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.846673 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.846748 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.846766 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.846785 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.846800 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.891183 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:25 crc kubenswrapper[5010]: E1126 15:27:25.891978 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.892166 5010 scope.go:117] "RemoveContainer" containerID="26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3" Nov 26 15:27:25 crc kubenswrapper[5010]: E1126 15:27:25.892527 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.950336 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.950391 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.950412 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.950435 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:25 crc kubenswrapper[5010]: I1126 15:27:25.950455 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:25Z","lastTransitionTime":"2025-11-26T15:27:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.053457 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.053529 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.053549 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.053577 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.053594 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.156375 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.156426 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.156436 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.156455 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.156465 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.260392 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.260449 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.260466 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.260491 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.260509 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.362802 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.362879 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.362898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.362929 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.362953 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.466108 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.466168 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.466184 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.466206 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.466223 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.569274 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.569345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.569381 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.569409 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.569426 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.672220 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.672311 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.672333 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.672361 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.672383 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.774964 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.775012 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.775030 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.775054 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.775067 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.877666 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.877731 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.877744 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.877763 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.877802 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.891306 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.891341 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:26 crc kubenswrapper[5010]: E1126 15:27:26.891456 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.891693 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:26 crc kubenswrapper[5010]: E1126 15:27:26.891842 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:26 crc kubenswrapper[5010]: E1126 15:27:26.892017 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.980305 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.980352 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.980365 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.980389 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:26 crc kubenswrapper[5010]: I1126 15:27:26.980401 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:26Z","lastTransitionTime":"2025-11-26T15:27:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.083146 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.083202 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.083214 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.083236 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.083249 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.185936 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.185998 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.186009 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.186030 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.186045 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.288878 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.289235 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.289330 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.289416 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.289502 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.393336 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.393400 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.393418 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.393443 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.393462 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.496340 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.496398 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.496412 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.496431 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.496445 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.558423 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.558691 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.558908 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.559113 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.559344 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.580532 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:27Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.586557 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.586827 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.587103 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.587308 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.587499 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.604360 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:27Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.610503 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.610548 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.610558 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.610581 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.610594 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.629576 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:27Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.634644 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.634687 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.634705 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.634751 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.634767 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.651214 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:27Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.656816 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.656856 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.656873 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.656896 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.656913 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.676135 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:27Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.676359 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.678823 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.678864 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.678881 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.678902 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.678918 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.770058 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.770341 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.770779 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:27:59.770749482 +0000 UTC m=+100.561466670 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.782890 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.782939 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.782958 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.782986 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.783006 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.886629 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.886693 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.886705 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.886745 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.886764 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.891667 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:27 crc kubenswrapper[5010]: E1126 15:27:27.892325 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.989323 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.989372 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.989384 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.989404 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:27 crc kubenswrapper[5010]: I1126 15:27:27.989418 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:27Z","lastTransitionTime":"2025-11-26T15:27:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.091814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.091863 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.091873 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.091887 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.091900 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.195438 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.196337 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.196512 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.196657 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.196818 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.300155 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.300222 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.300241 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.300280 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.300301 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.403357 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.403456 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.403469 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.403494 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.403507 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.506765 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.506834 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.506846 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.506869 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.506882 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.609467 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.609526 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.609558 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.609574 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.609583 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.713105 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.713158 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.713171 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.713194 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.713208 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.816389 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.816794 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.816858 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.816954 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.817060 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.892179 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.892175 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:28 crc kubenswrapper[5010]: E1126 15:27:28.892358 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:28 crc kubenswrapper[5010]: E1126 15:27:28.892513 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.892855 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:28 crc kubenswrapper[5010]: E1126 15:27:28.893089 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.921462 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.921511 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.921527 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.921549 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:28 crc kubenswrapper[5010]: I1126 15:27:28.921564 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:28Z","lastTransitionTime":"2025-11-26T15:27:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.024834 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.024890 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.024906 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.024929 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.024947 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.130157 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.130545 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.130629 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.130760 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.130840 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.234242 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.234290 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.234300 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.234318 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.234328 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.337283 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.337338 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.337351 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.337368 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.337378 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.376091 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/0.log" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.376147 5010 generic.go:334] "Generic (PLEG): container finished" podID="0a5a476f-6c13-4c62-8042-d9b37846aa18" containerID="263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad" exitCode=1 Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.376179 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerDied","Data":"263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.376562 5010 scope.go:117] "RemoveContainer" containerID="263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.399626 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.419781 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.433939 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.440283 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.440313 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.440323 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.440339 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.440351 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.445313 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.465883 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.481891 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.500987 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.523495 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.537568 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.543076 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.543120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.543134 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.543154 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.543172 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.557095 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.572963 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.587428 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.604662 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.617969 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.632412 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.646117 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.646183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.646202 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.646233 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.646257 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.652877 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.668966 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.686390 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.749052 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.749149 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.749169 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.749227 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.749248 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.852343 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.852401 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.852418 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.852441 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.852460 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.891321 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:29 crc kubenswrapper[5010]: E1126 15:27:29.891498 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.905600 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.926886 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:29Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.947520 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.954614 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.954647 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.954657 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.954674 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.954686 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:29Z","lastTransitionTime":"2025-11-26T15:27:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.963661 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:29 crc kubenswrapper[5010]: I1126 15:27:29.986459 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:29Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.004905 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.017480 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.037254 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.049687 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.057224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.057260 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.057271 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.057291 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.057305 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.063745 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.080025 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.092233 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.113642 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.130128 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.144529 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.157017 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.160063 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.160101 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.160114 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.160131 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.160141 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.169426 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.186522 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.262733 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.262783 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.262794 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.262810 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.262821 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.365937 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.365983 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.365994 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.366012 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.366022 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.383227 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/0.log" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.383373 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerStarted","Data":"fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.397650 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.415262 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.432578 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.444794 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.459627 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.468855 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.468909 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.468928 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.468953 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.468972 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.477047 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.495849 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.516732 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.533860 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.551881 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.572174 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.572233 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.572255 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.572282 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.572301 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.572939 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.591766 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.623526 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.646584 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.667334 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.676176 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.676259 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.676275 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.676294 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.676307 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.684929 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.701530 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.721782 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:30Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.779817 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.779866 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.779880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.779900 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.779916 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.888042 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.888368 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.888486 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.888544 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.888572 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.891385 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.891631 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.891696 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:30 crc kubenswrapper[5010]: E1126 15:27:30.891885 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:30 crc kubenswrapper[5010]: E1126 15:27:30.892296 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:30 crc kubenswrapper[5010]: E1126 15:27:30.892458 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.992208 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.992500 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.992871 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.993016 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:30 crc kubenswrapper[5010]: I1126 15:27:30.993153 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:30Z","lastTransitionTime":"2025-11-26T15:27:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.097235 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.097448 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.097581 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.097809 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.097953 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.201236 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.201303 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.201324 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.201348 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.201368 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.305125 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.305189 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.305203 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.305230 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.305247 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.407260 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.407304 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.407319 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.407355 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.407368 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.511017 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.511064 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.511081 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.511107 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.511122 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.613379 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.613469 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.613490 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.613521 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.613547 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.716496 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.716601 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.716626 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.716662 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.716691 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.819959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.820014 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.820023 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.820041 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.820052 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.891849 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:31 crc kubenswrapper[5010]: E1126 15:27:31.892021 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.922796 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.922873 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.922892 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.922921 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:31 crc kubenswrapper[5010]: I1126 15:27:31.922944 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:31Z","lastTransitionTime":"2025-11-26T15:27:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.026244 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.026313 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.026330 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.026358 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.026377 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.129621 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.129683 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.129702 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.129768 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.129803 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.232660 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.232756 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.232771 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.232801 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.232821 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.336175 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.336253 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.336278 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.336371 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.336397 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.441099 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.441166 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.441183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.441213 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.441234 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.544776 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.544880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.544902 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.544935 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.544956 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.648789 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.648880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.648898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.648926 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.648947 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.752624 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.752672 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.752691 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.752730 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.752743 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.855979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.856059 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.856078 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.856109 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.856132 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.890690 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.890782 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.890870 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:32 crc kubenswrapper[5010]: E1126 15:27:32.890934 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:32 crc kubenswrapper[5010]: E1126 15:27:32.891210 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:32 crc kubenswrapper[5010]: E1126 15:27:32.891316 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.959496 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.959572 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.959597 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.959635 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:32 crc kubenswrapper[5010]: I1126 15:27:32.959669 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:32Z","lastTransitionTime":"2025-11-26T15:27:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.062606 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.062650 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.062664 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.062688 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.062704 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.166634 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.166693 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.166740 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.166769 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.166785 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.270753 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.270835 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.270852 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.270878 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.270894 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.374438 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.374514 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.374532 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.374562 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.374586 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.477698 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.477770 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.477790 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.477813 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.477829 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.581418 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.581463 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.581474 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.581491 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.581505 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.684266 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.684298 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.684309 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.684320 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.684329 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.787826 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.787906 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.787934 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.787973 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.788008 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.890968 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:33 crc kubenswrapper[5010]: E1126 15:27:33.891146 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.891593 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.891673 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.891695 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.892084 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.892387 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.906791 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.995223 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.995273 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.995291 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.995312 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:33 crc kubenswrapper[5010]: I1126 15:27:33.995332 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:33Z","lastTransitionTime":"2025-11-26T15:27:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.101856 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.101922 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.101943 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.101970 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.101990 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.205572 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.205638 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.205651 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.205687 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.205704 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.309748 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.309784 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.309795 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.309818 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.309831 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.412365 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.412422 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.412440 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.412461 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.412480 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.515943 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.516011 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.516032 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.516066 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.516090 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.620027 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.620085 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.620103 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.620131 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.620154 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.725838 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.725892 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.725910 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.725937 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.725988 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.829878 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.829933 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.829950 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.829977 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.829998 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.890889 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.890965 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.890902 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:34 crc kubenswrapper[5010]: E1126 15:27:34.891101 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:34 crc kubenswrapper[5010]: E1126 15:27:34.891236 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:34 crc kubenswrapper[5010]: E1126 15:27:34.891395 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.933433 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.933489 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.933615 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.933862 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:34 crc kubenswrapper[5010]: I1126 15:27:34.934057 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:34Z","lastTransitionTime":"2025-11-26T15:27:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.037277 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.037326 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.037342 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.037365 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.037382 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.142045 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.142115 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.142135 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.142163 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.142183 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.245796 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.245860 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.245877 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.245903 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.245926 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.348996 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.349047 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.349067 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.349090 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.349106 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.452664 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.452783 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.452812 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.452844 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.452872 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.556461 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.556692 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.556742 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.556766 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.556787 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.659878 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.659938 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.659955 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.659979 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.659997 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.762759 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.762840 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.762868 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.762899 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.762923 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.865679 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.865771 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.865788 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.865812 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.865851 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.892105 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:35 crc kubenswrapper[5010]: E1126 15:27:35.892318 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.968599 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.968661 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.968679 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.968705 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:35 crc kubenswrapper[5010]: I1126 15:27:35.968754 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:35Z","lastTransitionTime":"2025-11-26T15:27:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.072491 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.072549 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.072566 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.072590 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.072608 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.175416 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.175525 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.175546 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.175571 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.175591 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.278803 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.278849 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.278866 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.278889 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.278911 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.382316 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.382387 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.382405 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.382432 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.382451 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.486757 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.486823 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.486840 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.486868 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.486886 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.591062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.591186 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.591205 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.591232 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.591251 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.694498 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.694565 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.694589 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.694620 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.694642 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.798053 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.798113 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.798132 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.798157 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.798176 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.890988 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.891015 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:36 crc kubenswrapper[5010]: E1126 15:27:36.891237 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.891330 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:36 crc kubenswrapper[5010]: E1126 15:27:36.891339 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:36 crc kubenswrapper[5010]: E1126 15:27:36.891694 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.901522 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.901558 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.901570 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.901588 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:36 crc kubenswrapper[5010]: I1126 15:27:36.901601 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:36Z","lastTransitionTime":"2025-11-26T15:27:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.005208 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.005296 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.005315 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.005344 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.005363 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.108749 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.108809 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.108827 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.108851 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.108872 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.210973 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.211074 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.211093 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.211120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.211138 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.314283 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.314340 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.314358 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.314383 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.314401 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.417980 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.418045 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.418062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.418087 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.418147 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.521296 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.521372 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.521396 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.521422 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.521440 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.625249 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.625290 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.625298 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.625314 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.625326 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.728943 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.729018 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.729038 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.729064 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.729083 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.832951 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.833003 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.833021 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.833045 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.833062 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.890843 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:37 crc kubenswrapper[5010]: E1126 15:27:37.891094 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.897886 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.897961 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.897982 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.898003 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.898021 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: E1126 15:27:37.921148 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:37Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.927221 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.927294 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.927312 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.927338 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.927356 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: E1126 15:27:37.949381 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:37Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.959509 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.959580 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.959766 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.959805 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.959829 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:37 crc kubenswrapper[5010]: E1126 15:27:37.982258 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:37Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.988415 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.988539 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.988559 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.988613 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:37 crc kubenswrapper[5010]: I1126 15:27:37.988632 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:37Z","lastTransitionTime":"2025-11-26T15:27:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: E1126 15:27:38.010867 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:37Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:38Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.018449 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.018513 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.018535 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.018562 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.018582 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: E1126 15:27:38.038298 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:38Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:38 crc kubenswrapper[5010]: E1126 15:27:38.038562 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.041224 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.041302 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.041319 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.041342 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.041361 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.145013 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.145080 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.145098 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.145122 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.145145 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.248027 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.248096 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.248118 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.248142 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.248161 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.351792 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.351889 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.351945 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.351973 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.351991 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.454920 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.454996 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.455018 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.455047 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.455070 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.559039 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.559286 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.559317 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.559350 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.559374 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.663100 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.663164 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.663182 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.663207 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.663229 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.767210 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.767270 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.767288 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.767312 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.767330 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.871157 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.871245 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.871264 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.871288 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.871309 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.891153 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.891224 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.891230 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:38 crc kubenswrapper[5010]: E1126 15:27:38.891391 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:38 crc kubenswrapper[5010]: E1126 15:27:38.891570 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:38 crc kubenswrapper[5010]: E1126 15:27:38.891815 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.976057 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.976121 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.976141 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.976164 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:38 crc kubenswrapper[5010]: I1126 15:27:38.976183 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:38Z","lastTransitionTime":"2025-11-26T15:27:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.079621 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.079763 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.079783 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.079806 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.079823 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.183177 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.183278 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.183327 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.183356 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.183374 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.287097 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.287171 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.287196 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.287226 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.287250 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.391288 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.391357 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.391382 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.391410 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.391431 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.494683 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.494770 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.494792 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.494819 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.494836 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.598043 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.598098 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.598116 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.598140 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.598157 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.702037 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.702102 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.702120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.702144 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.702165 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.805511 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.805614 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.805632 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.805659 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.805686 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.891083 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:39 crc kubenswrapper[5010]: E1126 15:27:39.891376 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.892760 5010 scope.go:117] "RemoveContainer" containerID="26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.909069 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.909886 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.909924 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.909957 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.909982 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:39Z","lastTransitionTime":"2025-11-26T15:27:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.912603 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:39Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.934007 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:39Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.957337 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:39Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:39 crc kubenswrapper[5010]: I1126 15:27:39.976403 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:39Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.001298 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:39Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.016453 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.016486 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.016496 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.016512 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.016520 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.021240 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.036420 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.071853 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.090588 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.118933 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.119226 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.119310 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.119338 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.119374 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.119399 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.136948 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.149370 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.173689 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.187964 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"388019d9-6c4b-41f6-b190-a2748de19329\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://942d958bdc0e38ec0d1362ab378f5623b67782d05dce2a4cc4fcc0a41220636f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.211189 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.224753 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.224814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.224829 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.224851 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.224865 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.227353 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.240080 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.254199 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.270639 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.328676 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.328753 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.328776 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.328804 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.328825 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.427535 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/2.log" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.432384 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.432430 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.432444 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.432467 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.432482 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.433520 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.434374 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.445216 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"388019d9-6c4b-41f6-b190-a2748de19329\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://942d958bdc0e38ec0d1362ab378f5623b67782d05dce2a4cc4fcc0a41220636f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.458211 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.467589 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.477825 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.487911 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.497956 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.512059 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.523509 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.534915 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.534947 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.534958 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.534976 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.535063 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.545702 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.562187 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.575549 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.590021 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.609022 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.620544 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.633213 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.637491 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.637542 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.637555 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.637573 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.637586 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.646489 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.657424 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.679741 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.691737 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:40Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.740097 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.740151 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.740175 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.740194 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.740206 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.845883 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.845962 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.845983 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.846011 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.846029 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.890892 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.890957 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:40 crc kubenswrapper[5010]: E1126 15:27:40.891031 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:40 crc kubenswrapper[5010]: E1126 15:27:40.891137 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.891209 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:40 crc kubenswrapper[5010]: E1126 15:27:40.891293 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.948880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.948930 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.948947 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.948973 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:40 crc kubenswrapper[5010]: I1126 15:27:40.948994 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:40Z","lastTransitionTime":"2025-11-26T15:27:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.052393 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.052463 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.052481 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.052505 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.052521 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.155558 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.155634 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.155660 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.155692 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.155761 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.259481 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.259540 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.259558 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.259584 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.259602 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.362788 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.362868 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.362887 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.362919 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.362938 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.439914 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/3.log" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.441210 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/2.log" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.445691 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" exitCode=1 Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.445777 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.445844 5010 scope.go:117] "RemoveContainer" containerID="26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.447381 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:27:41 crc kubenswrapper[5010]: E1126 15:27:41.448164 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.465605 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.465664 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.465684 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.465741 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.465770 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.474042 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.496031 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.523318 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.540125 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.558169 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.568919 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.568970 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.568987 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.569016 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.569036 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.577202 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.612327 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://26653abb8b7736f171e662dc850d6095f5da62214f8c7c1815ff52849df173a3\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:10Z\\\",\\\"message\\\":\\\"Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 15:27:10.866937 6700 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]} options:{GoMap:map[iface-id-ver:9d751cbb-f2e2-430d-9754-c882a5e924a5 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:3b 10.217.0.59]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {960d98b2-dc64-4e93-a4b6-9b19847af71e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1126 15:27:10.866960 6700 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: fa\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:10Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:40Z\\\",\\\"message\\\":\\\"openshift-machine-config-operator/machine-config-daemon-kt7rg in node crc\\\\nI1126 15:27:40.846816 7094 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-kt7rg after 0 failed attempt(s)\\\\nI1126 15:27:40.846832 7094 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:27:40.846848 7094 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-kt7rg\\\\nI1126 15:27:40.846850 7094 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:27:40.846840 7094 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 15:27:40.846866 7094 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 2.104674ms)\\\\nI1126 15:27:40.846911 7094 factory.go:656] Stopping watch factory\\\\nI1126 15:27:40.846946 7094 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 15:27:40.847100 7094 ovnkube.go:599] Stopped ovnkube\\\\nI1126 15:27:40.847138 7094 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 15:27:40.847234 7094 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.629823 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.663007 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.671668 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.671743 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.671761 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.671785 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.671804 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.683314 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.703213 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.722961 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.739961 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.757337 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.772741 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"388019d9-6c4b-41f6-b190-a2748de19329\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://942d958bdc0e38ec0d1362ab378f5623b67782d05dce2a4cc4fcc0a41220636f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.775165 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.775223 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.775280 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.775311 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.775368 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.794806 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.814127 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.832231 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.849231 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:41Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.878628 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.878702 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.878759 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.878790 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.878813 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.891342 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:41 crc kubenswrapper[5010]: E1126 15:27:41.891579 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.982691 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.982783 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.982801 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.982827 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:41 crc kubenswrapper[5010]: I1126 15:27:41.982846 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:41Z","lastTransitionTime":"2025-11-26T15:27:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.085536 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.085580 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.085592 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.085609 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.085621 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.188183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.188235 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.188251 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.188273 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.188290 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.291384 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.291444 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.291462 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.291485 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.291502 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.394014 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.394096 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.394122 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.394154 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.394179 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.452479 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/3.log" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.459028 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:27:42 crc kubenswrapper[5010]: E1126 15:27:42.459340 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.475768 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"388019d9-6c4b-41f6-b190-a2748de19329\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://942d958bdc0e38ec0d1362ab378f5623b67782d05dce2a4cc4fcc0a41220636f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.499888 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.499948 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.499969 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.499994 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.500012 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.500481 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.519959 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.537565 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.553198 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.570204 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.591997 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.602831 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.602923 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.602942 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.602968 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.602987 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.616202 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.643435 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.660005 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.681264 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.701539 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.706755 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.706858 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.706885 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.706914 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.706971 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.735690 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.755103 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.774983 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.798366 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.812127 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.812465 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.812498 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.812520 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.812692 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.820639 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.852866 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:40Z\\\",\\\"message\\\":\\\"openshift-machine-config-operator/machine-config-daemon-kt7rg in node crc\\\\nI1126 15:27:40.846816 7094 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-kt7rg after 0 failed attempt(s)\\\\nI1126 15:27:40.846832 7094 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:27:40.846848 7094 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-kt7rg\\\\nI1126 15:27:40.846850 7094 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:27:40.846840 7094 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 15:27:40.846866 7094 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 2.104674ms)\\\\nI1126 15:27:40.846911 7094 factory.go:656] Stopping watch factory\\\\nI1126 15:27:40.846946 7094 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 15:27:40.847100 7094 ovnkube.go:599] Stopped ovnkube\\\\nI1126 15:27:40.847138 7094 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 15:27:40.847234 7094 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.868470 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:42Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.891430 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.891447 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.891515 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:42 crc kubenswrapper[5010]: E1126 15:27:42.891595 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:42 crc kubenswrapper[5010]: E1126 15:27:42.892042 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:42 crc kubenswrapper[5010]: E1126 15:27:42.892178 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.915624 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.915681 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.915705 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.915814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:42 crc kubenswrapper[5010]: I1126 15:27:42.915842 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:42Z","lastTransitionTime":"2025-11-26T15:27:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.019242 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.019289 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.019300 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.019315 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.019325 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.122733 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.122784 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.122795 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.122815 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.122827 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.225795 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.225845 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.225862 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.225886 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.225904 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.329325 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.329389 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.329406 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.329434 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.329453 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.432577 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.432647 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.432666 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.432694 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.432742 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.535495 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.535571 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.535596 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.535623 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.535645 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.638137 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.638198 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.638222 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.638252 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.638274 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.741047 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.741113 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.741127 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.741145 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.741157 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.844744 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.844810 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.844827 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.844851 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.844871 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.890890 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:43 crc kubenswrapper[5010]: E1126 15:27:43.891150 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.947268 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.947338 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.947361 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.947391 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:43 crc kubenswrapper[5010]: I1126 15:27:43.947417 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:43Z","lastTransitionTime":"2025-11-26T15:27:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.050990 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.051056 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.051074 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.051099 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.051119 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.155026 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.155076 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.155098 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.155129 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.155150 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.258809 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.258890 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.258910 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.258942 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.258963 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.362031 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.362099 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.362116 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.362140 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.362158 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.473437 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.473475 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.473486 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.473504 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.473516 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.576541 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.576594 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.576610 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.576633 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.576651 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.680624 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.680683 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.680703 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.680765 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.680845 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.699383 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.699541 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.699506115 +0000 UTC m=+149.490223293 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.699628 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.699773 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.699915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.699957 5010 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.700558 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.700538201 +0000 UTC m=+149.491255409 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.700123 5010 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.700194 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.700695 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.700669765 +0000 UTC m=+149.491386983 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.700759 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.700793 5010 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.700889 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.70086144 +0000 UTC m=+149.491578648 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.784877 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.785240 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.785260 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.785284 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.785305 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.802190 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.802473 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.802518 5010 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.802541 5010 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.802621 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.802598003 +0000 UTC m=+149.593315191 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.888050 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.888148 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.888173 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.888202 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.888228 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.890863 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.890868 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.891124 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.891016 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.891265 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:44 crc kubenswrapper[5010]: E1126 15:27:44.891342 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.991333 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.991395 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.991414 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.991441 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:44 crc kubenswrapper[5010]: I1126 15:27:44.991461 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:44Z","lastTransitionTime":"2025-11-26T15:27:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.095526 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.095583 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.095601 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.096030 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.096083 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.202215 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.202256 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.202272 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.202294 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.202312 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.305532 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.305601 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.305621 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.305648 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.305666 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.409148 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.409492 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.409625 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.409843 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.409981 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.512759 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.512813 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.512828 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.512852 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.512869 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.616029 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.616546 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.616838 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.617091 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.617276 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.720461 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.720538 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.720558 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.720582 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.720599 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.824129 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.824186 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.824203 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.824228 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.824248 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.891509 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:45 crc kubenswrapper[5010]: E1126 15:27:45.891753 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.927554 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.927623 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.927649 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.927681 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:45 crc kubenswrapper[5010]: I1126 15:27:45.927705 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:45Z","lastTransitionTime":"2025-11-26T15:27:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.031256 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.031335 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.031358 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.031387 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.031410 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.134693 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.134785 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.134803 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.134832 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.134851 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.238434 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.238506 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.238533 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.238565 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.238592 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.342248 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.342299 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.342311 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.342329 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.342343 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.445892 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.445957 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.445975 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.446001 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.446019 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.548898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.549031 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.549050 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.549160 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.549197 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.652238 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.652304 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.652321 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.652345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.652364 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.756090 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.756162 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.756189 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.756222 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.756244 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.858851 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.858919 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.858943 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.858977 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.859006 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.891602 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.891652 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.891790 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:46 crc kubenswrapper[5010]: E1126 15:27:46.891950 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:46 crc kubenswrapper[5010]: E1126 15:27:46.892359 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:46 crc kubenswrapper[5010]: E1126 15:27:46.892558 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.961966 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.962057 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.962081 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.962113 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:46 crc kubenswrapper[5010]: I1126 15:27:46.962140 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:46Z","lastTransitionTime":"2025-11-26T15:27:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.065472 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.065520 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.065531 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.065545 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.065556 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.168420 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.168459 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.168468 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.168482 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.168492 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.271059 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.271093 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.271101 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.271115 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.271124 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.374108 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.374175 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.374198 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.374227 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.374244 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.477226 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.477275 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.477309 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.477330 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.477343 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.579394 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.579447 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.579464 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.579488 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.579507 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.682645 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.682693 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.682734 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.682752 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.682764 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.785195 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.785242 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.785254 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.785271 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.785284 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.888107 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.888195 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.888220 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.888254 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.888281 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.891394 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:47 crc kubenswrapper[5010]: E1126 15:27:47.891604 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.991917 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.991976 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.991994 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.992021 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:47 crc kubenswrapper[5010]: I1126 15:27:47.992040 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:47Z","lastTransitionTime":"2025-11-26T15:27:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.095798 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.095870 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.095890 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.095916 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.095935 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.199059 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.199120 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.199136 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.199163 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.199182 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.302298 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.302362 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.302380 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.302404 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.302421 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.400813 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.400880 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.400898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.400924 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.400941 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.422241 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.427504 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.427546 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.427669 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.427757 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.427795 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.446331 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.450735 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.450776 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.450788 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.450808 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.450822 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.462861 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.467546 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.467608 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.467628 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.467656 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.467676 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.488397 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.492124 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.492162 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.492175 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.492191 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.492203 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.503832 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:48Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.503982 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.505465 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.505494 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.505507 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.505523 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.505535 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.608636 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.608694 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.608746 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.608779 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.608801 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.712395 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.712486 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.712512 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.712544 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.712570 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.816178 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.816260 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.816291 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.816321 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.816342 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.891582 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.891631 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.891631 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.891863 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.891997 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:48 crc kubenswrapper[5010]: E1126 15:27:48.892144 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.919236 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.919298 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.919321 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.919349 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:48 crc kubenswrapper[5010]: I1126 15:27:48.919374 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:48Z","lastTransitionTime":"2025-11-26T15:27:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.022331 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.022396 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.022413 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.022439 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.022456 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.125333 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.125408 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.125425 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.125450 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.125467 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.228504 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.228572 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.228590 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.228616 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.228635 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.331316 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.331361 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.331373 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.331393 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.331405 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.433649 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.433739 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.433767 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.433799 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.433822 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.536932 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.536998 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.537015 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.537037 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.537057 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.639848 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.639913 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.639930 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.639953 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.639973 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.744139 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.744203 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.744226 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.744256 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.744279 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.847670 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.847782 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.847801 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.847830 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.847849 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.890882 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:49 crc kubenswrapper[5010]: E1126 15:27:49.891099 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.915911 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://665628f3e9e6d9a91cda933503dc0d0c062bc0f14a199f51d61dba2f1ad1ba26\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.936384 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.951978 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.952028 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.952042 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.952062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.952074 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:49Z","lastTransitionTime":"2025-11-26T15:27:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.972664 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d0be9287-aff5-4cc9-8872-baff6f38904a\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a34caeeeeb827fb698883c6def165bb1473741bea7c3b78abc2b09d84ae10e7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d5fd6bd5ffe1edbfc4dc8dd4ad9c8a13938bfcf9b73ba2a82e4666833c836207\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3d5318329dbbd70175ac0ad90338cbf485749e4e8a1e202120873b3a64016ebc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9ccc97f1a1c6328c898937055c391361c81f56872a73fe2dadc870fc44b5c7dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c8391cbe2ea95f1d181116ed2e58b0db45c6de14219c89a4fde25ebe09b02ac3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://a3591cb9ec9670f82d31472359c4dc45449851c44ab12e6eae2254be668a363e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6630c8d4f2a78401b969efecdfa73775bb6f3cd3d54f4b45cd5c418008edb48d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f1bd5b69fdb18b45bca1dd39e3faa9a4e8d4b43305928dab0b6e31e9e6868bfe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:49 crc kubenswrapper[5010]: I1126 15:27:49.992129 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a615ef53-f3d0-4f80-ac1f-e1909ad18b0b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cb4cf9eda117a1e43e1b6c21664b230959b985e180cd475249659e88d0a4fae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b0da5b3c1b34f348ff477e79bfe13214e8d876f44ed2a3a32bd5cd8e7bbb8d21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://52d7c92dc4124d6206fbc760b088dc8e3589b2bc1b2f472b9d981f477b2f1546\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1f37b133887832f51184bdafa680e1bcda0212b99842d74479a489d10eb1e54e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:49Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.015250 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.035289 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8d56986ad6e058ace10d60dd95bb58c11d939982cb78233745ace6a7aa7f5ce1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d79b7f461d048a56e9b6f662ce38f6665ba4cf536acb95bc166de67b5c01359b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.056772 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a6b0e322-9296-4356-9e3b-6497381eb30d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://094546975d7c867f5caeaf8ccb32deeaa5b78807a83aa1afc50902a590da4c2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qjf7z\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt7rg\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.058446 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.058684 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.058991 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.059936 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.060650 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.087221 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f10d9600-fac2-43e9-ad75-91b3c1f5b749\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:40Z\\\",\\\"message\\\":\\\"openshift-machine-config-operator/machine-config-daemon-kt7rg in node crc\\\\nI1126 15:27:40.846816 7094 obj_retry.go:386] Retry successful for *v1.Pod openshift-machine-config-operator/machine-config-daemon-kt7rg after 0 failed attempt(s)\\\\nI1126 15:27:40.846832 7094 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 15:27:40.846848 7094 default_network_controller.go:776] Recording success event on pod openshift-machine-config-operator/machine-config-daemon-kt7rg\\\\nI1126 15:27:40.846850 7094 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 15:27:40.846840 7094 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 15:27:40.846866 7094 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 2.104674ms)\\\\nI1126 15:27:40.846911 7094 factory.go:656] Stopping watch factory\\\\nI1126 15:27:40.846946 7094 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 15:27:40.847100 7094 ovnkube.go:599] Stopped ovnkube\\\\nI1126 15:27:40.847138 7094 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 15:27:40.847234 7094 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create a\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:27:40Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qxqgl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hlqt9\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.102969 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-df2ll" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fd9f5a65-e633-439f-8e8d-b760d20a3223\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2kh4k\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-df2ll\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.120272 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"388019d9-6c4b-41f6-b190-a2748de19329\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://942d958bdc0e38ec0d1362ab378f5623b67782d05dce2a4cc4fcc0a41220636f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://180b2986fae956e9fdccffd7a7a22a0887847abd31f4e9d2e12e73e6e645a256\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.141853 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"23c4a5fd-d711-43a1-95e5-db6d9016f440\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:53Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T15:26:39Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI1126 15:26:33.931222 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 15:26:33.932823 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-460706790/tls.crt::/tmp/serving-cert-460706790/tls.key\\\\\\\"\\\\nI1126 15:26:39.891232 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 15:26:39.895483 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 15:26:39.895512 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 15:26:39.895550 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 15:26:39.895561 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 15:26:39.904910 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 15:26:39.904942 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904953 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 15:26:39.904963 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 15:26:39.904970 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 15:26:39.904977 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 15:26:39.904983 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 15:26:39.905283 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 15:26:39.907766 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:23Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:19Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.160628 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"365a5dec-5977-4cdf-bf29-d9f6f8913947\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bd021596db112623a5ec3815dfc785f721c3196891ad7e1b5ca595a83220ff45\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8afe5fdb6c8e65b271cbb579a2d8494a58f45d4f972abfaaed3fe2b541c0de1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://25d4f25cee49fab6800b4f76945a24fb119c062748df166cc853aeb391d20e6d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:20Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.164094 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.164136 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.164150 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.164170 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.164181 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.181189 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7bd4578d938456a3ec6d112130299d24601a69769daf26e897de988fc867ddba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.198238 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-5mb6f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6838c72e-3f88-4cb6-91e6-954cf7d9fef3\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6c483170e0f9345697552f48aad1d30b0e5930ff2f6157492cdcaa11b6db6f22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6mhss\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-5mb6f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.217096 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"cb2cca68-6cd3-4ee3-9a3b-b1d22857d2b2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://73e2b22029e2c4f420b57d567dec33662a388db91aec1ced23817851a320198a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://751fadb5409cb440571a1238c5d3b694e273cad753f3aee5f4a9a9cbcd31cd1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:55Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-64zw4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:55Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qfvdc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.236983 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:40Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.259657 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-94lzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0a5a476f-6c13-4c62-8042-d9b37846aa18\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T15:27:28Z\\\",\\\"message\\\":\\\"2025-11-26T15:26:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf\\\\n2025-11-26T15:26:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e63d18bb-d85e-460c-9387-a740acc42baf to /host/opt/cni/bin/\\\\n2025-11-26T15:26:43Z [verbose] multus-daemon started\\\\n2025-11-26T15:26:43Z [verbose] Readiness Indicator file check\\\\n2025-11-26T15:27:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:27:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-v67mh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-94lzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.267110 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.267167 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.267186 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.267213 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.267231 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.283846 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56f2d574-eefa-4be0-bf3f-aff08053f4e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://72f21a1ecf7e79b3573c41fa8e662d0d50339da3c0d476475966cb5ad9c58af0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f478bc30f20f8cf23f2728ef7bc6dee08fd209ebff62a14ab615607bbca6079b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21732c4370ba4d1bab3e60b617dfb7529144e2b95dc5fc1a5a039968a3acac4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b5fc51ad52b750abfcee903d331c25a3e4ff49f1eccbac7420ea67284154d2bc\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3928c6ca0eede6f1df6c25a89a4dc95b2acdba19e29d09c80fe88d44912bd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://227bd4e8d60e9737efcedb533a81d27c4541620c55a78e92922ef401d01c6e7a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2ecc650b699b7bb541dc2e15dc9ec4f8dddf423e9e6f0a7fe1ea198db0a36edd\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T15:26:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T15:26:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xtczx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:41Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-sc4tv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.302512 5010 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-ckdwd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4ab940b-709f-4f03-ac81-9d6d57364f48\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T15:26:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebd3fd05299597653eaa14ad5ddcc2fd5b10d7217e9f075f57ab220470e32334\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T15:26:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lmwfp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T15:26:43Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-ckdwd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:50Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.370360 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.370422 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.370440 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.370468 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.370486 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.474184 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.474243 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.474264 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.474290 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.474309 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.579387 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.579468 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.579490 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.579517 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.579543 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.682852 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.682922 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.682940 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.682965 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.682985 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.786510 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.786570 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.786594 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.786623 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.786644 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.889475 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.889527 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.889544 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.889568 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.889586 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.891353 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:50 crc kubenswrapper[5010]: E1126 15:27:50.891500 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.891607 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:50 crc kubenswrapper[5010]: E1126 15:27:50.891870 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.891955 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:50 crc kubenswrapper[5010]: E1126 15:27:50.892085 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.993363 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.993422 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.993442 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.993470 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:50 crc kubenswrapper[5010]: I1126 15:27:50.993489 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:50Z","lastTransitionTime":"2025-11-26T15:27:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.096584 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.096656 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.096672 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.096699 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.096747 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.200097 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.200190 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.200210 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.200377 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.200410 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.303573 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.303631 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.303651 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.303677 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.303698 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.407125 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.407219 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.407245 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.407280 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.407304 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.510660 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.510744 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.510763 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.510785 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.510802 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.613194 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.613259 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.613276 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.613305 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.613324 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.716982 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.717043 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.717064 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.717089 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.717108 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.819608 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.819659 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.819675 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.819699 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.819774 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.891451 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:51 crc kubenswrapper[5010]: E1126 15:27:51.891644 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.921969 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.922035 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.922059 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.922091 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:51 crc kubenswrapper[5010]: I1126 15:27:51.922115 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:51Z","lastTransitionTime":"2025-11-26T15:27:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.025389 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.025455 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.025477 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.025510 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.025534 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.128485 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.128547 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.128564 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.128588 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.128607 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.231036 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.231083 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.231096 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.231114 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.231125 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.333746 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.333829 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.333846 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.333874 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.333896 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.436891 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.436946 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.436956 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.436975 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.436987 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.539107 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.539155 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.539171 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.539191 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.539204 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.641634 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.641698 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.641734 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.641756 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.641771 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.748788 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.749420 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.749497 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.749525 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.749543 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.852978 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.853037 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.853062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.853094 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.853118 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.890686 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.890747 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.890680 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:52 crc kubenswrapper[5010]: E1126 15:27:52.890830 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:52 crc kubenswrapper[5010]: E1126 15:27:52.890879 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:52 crc kubenswrapper[5010]: E1126 15:27:52.891000 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.955770 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.955831 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.955856 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.955888 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:52 crc kubenswrapper[5010]: I1126 15:27:52.955911 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:52Z","lastTransitionTime":"2025-11-26T15:27:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.058373 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.058428 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.058446 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.058468 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.058484 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.161456 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.161508 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.161519 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.161537 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.161549 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.264192 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.264229 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.264240 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.264256 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.264265 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.366655 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.366743 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.366797 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.366823 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.366840 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.470237 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.470304 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.470321 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.470344 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.470363 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.573652 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.573747 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.573767 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.573791 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.573809 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.677176 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.677230 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.677246 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.677271 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.677288 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.780534 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.780601 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.780621 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.780648 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.780778 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.883411 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.883476 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.883495 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.883521 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.883539 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.890986 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:53 crc kubenswrapper[5010]: E1126 15:27:53.891181 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.986815 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.986870 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.986881 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.986898 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:53 crc kubenswrapper[5010]: I1126 15:27:53.986911 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:53Z","lastTransitionTime":"2025-11-26T15:27:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.090101 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.090159 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.090183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.090211 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.090235 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.193913 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.193990 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.194016 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.194045 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.194066 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.297565 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.297627 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.297643 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.297669 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.297690 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.400954 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.400995 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.401012 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.401038 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.401055 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.504523 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.504595 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.504611 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.504636 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.504655 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.608135 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.608186 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.608196 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.608213 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.608226 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.711098 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.711158 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.711175 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.711199 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.711216 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.815407 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.815484 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.815502 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.815529 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.815550 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.891425 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:54 crc kubenswrapper[5010]: E1126 15:27:54.891630 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.891668 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.891830 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:54 crc kubenswrapper[5010]: E1126 15:27:54.891865 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:54 crc kubenswrapper[5010]: E1126 15:27:54.892174 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.922864 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.922939 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.922961 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.923000 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:54 crc kubenswrapper[5010]: I1126 15:27:54.923023 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:54Z","lastTransitionTime":"2025-11-26T15:27:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.026890 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.026959 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.027030 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.027062 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.027080 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.129952 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.130028 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.130090 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.130121 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.130145 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.233293 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.233372 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.233392 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.233418 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.233436 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.337277 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.337690 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.337901 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.338209 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.338374 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.441941 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.442021 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.442047 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.442072 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.442092 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.545216 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.545281 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.545298 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.545324 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.545342 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.649190 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.649392 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.649417 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.649443 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.649465 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.752347 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.752387 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.752400 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.752416 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.752432 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.856137 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.856201 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.856219 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.856247 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.856271 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.891226 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:55 crc kubenswrapper[5010]: E1126 15:27:55.891631 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.892793 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:27:55 crc kubenswrapper[5010]: E1126 15:27:55.893091 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.959067 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.959151 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.959165 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.959183 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:55 crc kubenswrapper[5010]: I1126 15:27:55.959568 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:55Z","lastTransitionTime":"2025-11-26T15:27:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.062017 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.062073 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.062086 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.062109 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.062123 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.165287 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.165348 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.165361 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.165382 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.165398 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.268843 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.268895 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.268906 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.268926 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.268938 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.372851 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.372928 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.372948 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.372978 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.373016 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.476586 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.476634 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.476651 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.476675 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.476693 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.579971 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.580378 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.580405 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.580432 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.580450 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.685774 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.685861 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.685887 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.685922 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.685947 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.788355 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.788413 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.788439 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.788470 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.788493 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891302 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891302 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:56 crc kubenswrapper[5010]: E1126 15:27:56.891515 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:56 crc kubenswrapper[5010]: E1126 15:27:56.891633 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891327 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891699 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891790 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891814 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891842 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: E1126 15:27:56.891849 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.891864 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.995239 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.995290 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.995302 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.995322 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:56 crc kubenswrapper[5010]: I1126 15:27:56.995336 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:56Z","lastTransitionTime":"2025-11-26T15:27:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.098802 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.098852 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.098867 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.098890 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.098907 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.202771 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.202846 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.202871 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.202903 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.202923 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.306235 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.306282 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.306297 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.306313 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.306325 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.409297 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.409345 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.409357 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.409374 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.409385 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.513089 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.513162 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.513186 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.513217 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.513239 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.616564 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.616627 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.616645 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.616668 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.616686 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.719636 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.719750 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.719771 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.719798 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.719818 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.822775 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.822856 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.822923 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.822957 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.822980 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.891068 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:57 crc kubenswrapper[5010]: E1126 15:27:57.891281 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.925464 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.925517 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.925532 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.925553 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:57 crc kubenswrapper[5010]: I1126 15:27:57.925570 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:57Z","lastTransitionTime":"2025-11-26T15:27:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.028470 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.028521 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.028532 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.028552 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.028566 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.132227 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.132305 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.132327 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.132353 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.132372 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.235314 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.235390 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.235414 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.235445 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.235472 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.338586 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.338655 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.338674 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.338698 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.338748 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.441858 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.441941 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.441965 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.441999 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.442021 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.544947 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.545005 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.545022 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.545047 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.545066 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.648281 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.648330 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.648347 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.648370 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.648387 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.751388 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.751441 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.751462 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.751489 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.751510 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.854359 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.854418 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.854434 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.854459 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.854479 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.888697 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.888764 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.888780 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.888804 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.888821 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.890606 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.890664 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:27:58 crc kubenswrapper[5010]: E1126 15:27:58.890896 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.890966 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:27:58 crc kubenswrapper[5010]: E1126 15:27:58.890973 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:27:58 crc kubenswrapper[5010]: E1126 15:27:58.891228 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:27:58 crc kubenswrapper[5010]: E1126 15:27:58.909404 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:58Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.915182 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.915248 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.915267 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.915291 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.915312 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: E1126 15:27:58.934220 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:58Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.940157 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.940221 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.940240 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.940265 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.940284 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:58 crc kubenswrapper[5010]: E1126 15:27:58.961339 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T15:27:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"21620236-c00a-4f13-9fac-891f828aea35\\\",\\\"systemUUID\\\":\\\"acbf26fa-40c0-4dfa-8770-e9f2cef78fa9\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T15:27:58Z is after 2025-08-24T17:21:41Z" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.966928 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.967001 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.967032 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.967064 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 15:27:58 crc kubenswrapper[5010]: I1126 15:27:58.967091 5010 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T15:27:58Z","lastTransitionTime":"2025-11-26T15:27:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.041449 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6"] Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.042522 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.047154 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.047184 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.047277 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.047765 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.072379 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fea1796f-dda7-4290-8239-a1d0ef8de716-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.072428 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fea1796f-dda7-4290-8239-a1d0ef8de716-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.072462 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fea1796f-dda7-4290-8239-a1d0ef8de716-service-ca\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.072679 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fea1796f-dda7-4290-8239-a1d0ef8de716-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.072771 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fea1796f-dda7-4290-8239-a1d0ef8de716-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.103131 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-94lzp" podStartSLOduration=79.103105629 podStartE2EDuration="1m19.103105629s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.101896108 +0000 UTC m=+99.892613276" watchObservedRunningTime="2025-11-26 15:27:59.103105629 +0000 UTC m=+99.893822787" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.156393 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-sc4tv" podStartSLOduration=79.156363358 podStartE2EDuration="1m19.156363358s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.156240995 +0000 UTC m=+99.946958143" watchObservedRunningTime="2025-11-26 15:27:59.156363358 +0000 UTC m=+99.947080526" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.173463 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fea1796f-dda7-4290-8239-a1d0ef8de716-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.173525 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fea1796f-dda7-4290-8239-a1d0ef8de716-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.173557 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fea1796f-dda7-4290-8239-a1d0ef8de716-service-ca\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.173616 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fea1796f-dda7-4290-8239-a1d0ef8de716-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.173653 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fea1796f-dda7-4290-8239-a1d0ef8de716-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.173764 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/fea1796f-dda7-4290-8239-a1d0ef8de716-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.174375 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/fea1796f-dda7-4290-8239-a1d0ef8de716-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.174772 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fea1796f-dda7-4290-8239-a1d0ef8de716-service-ca\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.177913 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-ckdwd" podStartSLOduration=80.177892261 podStartE2EDuration="1m20.177892261s" podCreationTimestamp="2025-11-26 15:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.177439729 +0000 UTC m=+99.968156887" watchObservedRunningTime="2025-11-26 15:27:59.177892261 +0000 UTC m=+99.968609419" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.180690 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fea1796f-dda7-4290-8239-a1d0ef8de716-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.192678 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/fea1796f-dda7-4290-8239-a1d0ef8de716-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-t5zp6\" (UID: \"fea1796f-dda7-4290-8239-a1d0ef8de716\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.262444 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=79.262417402 podStartE2EDuration="1m19.262417402s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.261219182 +0000 UTC m=+100.051936330" watchObservedRunningTime="2025-11-26 15:27:59.262417402 +0000 UTC m=+100.053134590" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.289836 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=45.289811466 podStartE2EDuration="45.289811466s" podCreationTimestamp="2025-11-26 15:27:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.2895741 +0000 UTC m=+100.080291288" watchObservedRunningTime="2025-11-26 15:27:59.289811466 +0000 UTC m=+100.080528624" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.359107 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podStartSLOduration=79.359082886 podStartE2EDuration="1m19.359082886s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.331526848 +0000 UTC m=+100.122244016" watchObservedRunningTime="2025-11-26 15:27:59.359082886 +0000 UTC m=+100.149800034" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.367668 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.397799 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=26.39777898 podStartE2EDuration="26.39777898s" podCreationTimestamp="2025-11-26 15:27:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.373298741 +0000 UTC m=+100.164015899" watchObservedRunningTime="2025-11-26 15:27:59.39777898 +0000 UTC m=+100.188496128" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.413369 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=79.41335343 podStartE2EDuration="1m19.41335343s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.397614116 +0000 UTC m=+100.188331264" watchObservedRunningTime="2025-11-26 15:27:59.41335343 +0000 UTC m=+100.204070578" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.433592 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=75.43356623 podStartE2EDuration="1m15.43356623s" podCreationTimestamp="2025-11-26 15:26:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.415116596 +0000 UTC m=+100.205833774" watchObservedRunningTime="2025-11-26 15:27:59.43356623 +0000 UTC m=+100.224283378" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.459549 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qfvdc" podStartSLOduration=79.459531027 podStartE2EDuration="1m19.459531027s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.459090066 +0000 UTC m=+100.249807214" watchObservedRunningTime="2025-11-26 15:27:59.459531027 +0000 UTC m=+100.250248185" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.459885 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-5mb6f" podStartSLOduration=80.459877946 podStartE2EDuration="1m20.459877946s" podCreationTimestamp="2025-11-26 15:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.446162213 +0000 UTC m=+100.236879361" watchObservedRunningTime="2025-11-26 15:27:59.459877946 +0000 UTC m=+100.250595084" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.532361 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" event={"ID":"fea1796f-dda7-4290-8239-a1d0ef8de716","Type":"ContainerStarted","Data":"d416fb5feccddf31b150a58bff1b12458951bf330a76607f7412f8834dc01612"} Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.532407 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" event={"ID":"fea1796f-dda7-4290-8239-a1d0ef8de716","Type":"ContainerStarted","Data":"cf2839f8326c0c816bfceea1ee169ed585c8cfd2a8a91fe636adebbeb86b9bef"} Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.545123 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-t5zp6" podStartSLOduration=79.545100025 podStartE2EDuration="1m19.545100025s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:27:59.544089689 +0000 UTC m=+100.334806837" watchObservedRunningTime="2025-11-26 15:27:59.545100025 +0000 UTC m=+100.335817213" Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.777985 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:59 crc kubenswrapper[5010]: E1126 15:27:59.778130 5010 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:59 crc kubenswrapper[5010]: E1126 15:27:59.778187 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs podName:fd9f5a65-e633-439f-8e8d-b760d20a3223 nodeName:}" failed. No retries permitted until 2025-11-26 15:29:03.778170194 +0000 UTC m=+164.568887352 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs") pod "network-metrics-daemon-df2ll" (UID: "fd9f5a65-e633-439f-8e8d-b760d20a3223") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 15:27:59 crc kubenswrapper[5010]: I1126 15:27:59.890742 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:27:59 crc kubenswrapper[5010]: E1126 15:27:59.892491 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:00 crc kubenswrapper[5010]: I1126 15:28:00.891169 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:00 crc kubenswrapper[5010]: I1126 15:28:00.891176 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:00 crc kubenswrapper[5010]: E1126 15:28:00.891368 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:00 crc kubenswrapper[5010]: E1126 15:28:00.891464 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:00 crc kubenswrapper[5010]: I1126 15:28:00.891203 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:00 crc kubenswrapper[5010]: E1126 15:28:00.891747 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:01 crc kubenswrapper[5010]: I1126 15:28:01.891750 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:01 crc kubenswrapper[5010]: E1126 15:28:01.892496 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:02 crc kubenswrapper[5010]: I1126 15:28:02.891388 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:02 crc kubenswrapper[5010]: E1126 15:28:02.891942 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:02 crc kubenswrapper[5010]: I1126 15:28:02.891509 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:02 crc kubenswrapper[5010]: E1126 15:28:02.892945 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:02 crc kubenswrapper[5010]: I1126 15:28:02.891430 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:02 crc kubenswrapper[5010]: E1126 15:28:02.893329 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:03 crc kubenswrapper[5010]: I1126 15:28:03.891285 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:03 crc kubenswrapper[5010]: E1126 15:28:03.891438 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:04 crc kubenswrapper[5010]: I1126 15:28:04.891358 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:04 crc kubenswrapper[5010]: E1126 15:28:04.891510 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:04 crc kubenswrapper[5010]: I1126 15:28:04.891851 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:04 crc kubenswrapper[5010]: E1126 15:28:04.891973 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:04 crc kubenswrapper[5010]: I1126 15:28:04.892172 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:04 crc kubenswrapper[5010]: E1126 15:28:04.892258 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:05 crc kubenswrapper[5010]: I1126 15:28:05.891512 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:05 crc kubenswrapper[5010]: E1126 15:28:05.891783 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:06 crc kubenswrapper[5010]: I1126 15:28:06.891389 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:06 crc kubenswrapper[5010]: I1126 15:28:06.892097 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:06 crc kubenswrapper[5010]: E1126 15:28:06.892360 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:06 crc kubenswrapper[5010]: E1126 15:28:06.892542 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:06 crc kubenswrapper[5010]: I1126 15:28:06.892604 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:28:06 crc kubenswrapper[5010]: E1126 15:28:06.892973 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:28:06 crc kubenswrapper[5010]: I1126 15:28:06.893382 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:06 crc kubenswrapper[5010]: E1126 15:28:06.893669 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:07 crc kubenswrapper[5010]: I1126 15:28:07.891261 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:07 crc kubenswrapper[5010]: E1126 15:28:07.891464 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:08 crc kubenswrapper[5010]: I1126 15:28:08.890533 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:08 crc kubenswrapper[5010]: I1126 15:28:08.890551 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:08 crc kubenswrapper[5010]: I1126 15:28:08.890553 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:08 crc kubenswrapper[5010]: E1126 15:28:08.890951 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:08 crc kubenswrapper[5010]: E1126 15:28:08.891012 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:08 crc kubenswrapper[5010]: E1126 15:28:08.890766 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:09 crc kubenswrapper[5010]: I1126 15:28:09.891221 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:09 crc kubenswrapper[5010]: E1126 15:28:09.893847 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:10 crc kubenswrapper[5010]: I1126 15:28:10.891602 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:10 crc kubenswrapper[5010]: I1126 15:28:10.891654 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:10 crc kubenswrapper[5010]: E1126 15:28:10.891816 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:10 crc kubenswrapper[5010]: I1126 15:28:10.891631 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:10 crc kubenswrapper[5010]: E1126 15:28:10.892223 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:10 crc kubenswrapper[5010]: E1126 15:28:10.892310 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:11 crc kubenswrapper[5010]: I1126 15:28:11.891770 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:11 crc kubenswrapper[5010]: E1126 15:28:11.891972 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:12 crc kubenswrapper[5010]: I1126 15:28:12.890869 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:12 crc kubenswrapper[5010]: I1126 15:28:12.890928 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:12 crc kubenswrapper[5010]: E1126 15:28:12.891614 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:12 crc kubenswrapper[5010]: E1126 15:28:12.891694 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:12 crc kubenswrapper[5010]: I1126 15:28:12.890957 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:12 crc kubenswrapper[5010]: E1126 15:28:12.891838 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:13 crc kubenswrapper[5010]: I1126 15:28:13.891283 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:13 crc kubenswrapper[5010]: E1126 15:28:13.891503 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:14 crc kubenswrapper[5010]: I1126 15:28:14.891007 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:14 crc kubenswrapper[5010]: I1126 15:28:14.891104 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:14 crc kubenswrapper[5010]: I1126 15:28:14.891163 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:14 crc kubenswrapper[5010]: E1126 15:28:14.891211 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:14 crc kubenswrapper[5010]: E1126 15:28:14.891417 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:14 crc kubenswrapper[5010]: E1126 15:28:14.891592 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:15 crc kubenswrapper[5010]: I1126 15:28:15.600110 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/1.log" Nov 26 15:28:15 crc kubenswrapper[5010]: I1126 15:28:15.600817 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/0.log" Nov 26 15:28:15 crc kubenswrapper[5010]: I1126 15:28:15.600865 5010 generic.go:334] "Generic (PLEG): container finished" podID="0a5a476f-6c13-4c62-8042-d9b37846aa18" containerID="fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a" exitCode=1 Nov 26 15:28:15 crc kubenswrapper[5010]: I1126 15:28:15.600897 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerDied","Data":"fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a"} Nov 26 15:28:15 crc kubenswrapper[5010]: I1126 15:28:15.600944 5010 scope.go:117] "RemoveContainer" containerID="263c58f6732a0dd15b348e4d4ec0167247cd1cd685ae21533cd54f842fcd99ad" Nov 26 15:28:15 crc kubenswrapper[5010]: I1126 15:28:15.601505 5010 scope.go:117] "RemoveContainer" containerID="fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a" Nov 26 15:28:15 crc kubenswrapper[5010]: E1126 15:28:15.601785 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-94lzp_openshift-multus(0a5a476f-6c13-4c62-8042-d9b37846aa18)\"" pod="openshift-multus/multus-94lzp" podUID="0a5a476f-6c13-4c62-8042-d9b37846aa18" Nov 26 15:28:15 crc kubenswrapper[5010]: I1126 15:28:15.890788 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:15 crc kubenswrapper[5010]: E1126 15:28:15.891351 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:16 crc kubenswrapper[5010]: I1126 15:28:16.606410 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/1.log" Nov 26 15:28:16 crc kubenswrapper[5010]: I1126 15:28:16.891184 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:16 crc kubenswrapper[5010]: I1126 15:28:16.891308 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:16 crc kubenswrapper[5010]: E1126 15:28:16.891408 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:16 crc kubenswrapper[5010]: E1126 15:28:16.891501 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:16 crc kubenswrapper[5010]: I1126 15:28:16.891551 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:16 crc kubenswrapper[5010]: E1126 15:28:16.891639 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:17 crc kubenswrapper[5010]: I1126 15:28:17.891140 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:17 crc kubenswrapper[5010]: E1126 15:28:17.891394 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:18 crc kubenswrapper[5010]: I1126 15:28:18.891444 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:18 crc kubenswrapper[5010]: I1126 15:28:18.891545 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:18 crc kubenswrapper[5010]: I1126 15:28:18.891476 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:18 crc kubenswrapper[5010]: E1126 15:28:18.891760 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:18 crc kubenswrapper[5010]: E1126 15:28:18.891875 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:18 crc kubenswrapper[5010]: E1126 15:28:18.892009 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:19 crc kubenswrapper[5010]: E1126 15:28:19.870169 5010 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 26 15:28:19 crc kubenswrapper[5010]: I1126 15:28:19.890590 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:19 crc kubenswrapper[5010]: E1126 15:28:19.892628 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:19 crc kubenswrapper[5010]: I1126 15:28:19.893805 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:28:19 crc kubenswrapper[5010]: E1126 15:28:19.894104 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hlqt9_openshift-ovn-kubernetes(f10d9600-fac2-43e9-ad75-91b3c1f5b749)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" Nov 26 15:28:19 crc kubenswrapper[5010]: E1126 15:28:19.997921 5010 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 15:28:20 crc kubenswrapper[5010]: I1126 15:28:20.890938 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:20 crc kubenswrapper[5010]: I1126 15:28:20.890963 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:20 crc kubenswrapper[5010]: E1126 15:28:20.891158 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:20 crc kubenswrapper[5010]: I1126 15:28:20.891438 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:20 crc kubenswrapper[5010]: E1126 15:28:20.891540 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:20 crc kubenswrapper[5010]: E1126 15:28:20.891794 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:21 crc kubenswrapper[5010]: I1126 15:28:21.891205 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:21 crc kubenswrapper[5010]: E1126 15:28:21.891491 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:22 crc kubenswrapper[5010]: I1126 15:28:22.891702 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:22 crc kubenswrapper[5010]: I1126 15:28:22.891770 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:22 crc kubenswrapper[5010]: E1126 15:28:22.891890 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:22 crc kubenswrapper[5010]: I1126 15:28:22.891913 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:22 crc kubenswrapper[5010]: E1126 15:28:22.892268 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:22 crc kubenswrapper[5010]: E1126 15:28:22.892372 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:23 crc kubenswrapper[5010]: I1126 15:28:23.891584 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:23 crc kubenswrapper[5010]: E1126 15:28:23.891839 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:24 crc kubenswrapper[5010]: I1126 15:28:24.891126 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:24 crc kubenswrapper[5010]: I1126 15:28:24.891142 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:24 crc kubenswrapper[5010]: E1126 15:28:24.891341 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:24 crc kubenswrapper[5010]: E1126 15:28:24.891534 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:24 crc kubenswrapper[5010]: I1126 15:28:24.891142 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:24 crc kubenswrapper[5010]: E1126 15:28:24.891736 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:24 crc kubenswrapper[5010]: E1126 15:28:24.999213 5010 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 15:28:25 crc kubenswrapper[5010]: I1126 15:28:25.891148 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:25 crc kubenswrapper[5010]: E1126 15:28:25.891539 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:26 crc kubenswrapper[5010]: I1126 15:28:26.890814 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:26 crc kubenswrapper[5010]: I1126 15:28:26.890895 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:26 crc kubenswrapper[5010]: I1126 15:28:26.891023 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:26 crc kubenswrapper[5010]: E1126 15:28:26.891463 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:26 crc kubenswrapper[5010]: E1126 15:28:26.891607 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:26 crc kubenswrapper[5010]: I1126 15:28:26.891743 5010 scope.go:117] "RemoveContainer" containerID="fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a" Nov 26 15:28:26 crc kubenswrapper[5010]: E1126 15:28:26.892895 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:27 crc kubenswrapper[5010]: I1126 15:28:27.649230 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/1.log" Nov 26 15:28:27 crc kubenswrapper[5010]: I1126 15:28:27.649818 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerStarted","Data":"5358d44abff63ec38fcdcf83ef302371855bdc2ea7e63d36b38665e5a8434fdb"} Nov 26 15:28:27 crc kubenswrapper[5010]: I1126 15:28:27.892222 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:27 crc kubenswrapper[5010]: E1126 15:28:27.892452 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:28 crc kubenswrapper[5010]: I1126 15:28:28.891448 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:28 crc kubenswrapper[5010]: I1126 15:28:28.891578 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:28 crc kubenswrapper[5010]: I1126 15:28:28.891670 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:28 crc kubenswrapper[5010]: E1126 15:28:28.891671 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:28 crc kubenswrapper[5010]: E1126 15:28:28.891875 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:28 crc kubenswrapper[5010]: E1126 15:28:28.892000 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:29 crc kubenswrapper[5010]: I1126 15:28:29.890591 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:29 crc kubenswrapper[5010]: E1126 15:28:29.892313 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:30 crc kubenswrapper[5010]: E1126 15:28:30.000124 5010 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 15:28:30 crc kubenswrapper[5010]: I1126 15:28:30.891474 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:30 crc kubenswrapper[5010]: I1126 15:28:30.891545 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:30 crc kubenswrapper[5010]: I1126 15:28:30.891657 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:30 crc kubenswrapper[5010]: E1126 15:28:30.891785 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:30 crc kubenswrapper[5010]: E1126 15:28:30.891924 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:30 crc kubenswrapper[5010]: E1126 15:28:30.892063 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:31 crc kubenswrapper[5010]: I1126 15:28:31.891210 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:31 crc kubenswrapper[5010]: E1126 15:28:31.891411 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:31 crc kubenswrapper[5010]: I1126 15:28:31.893156 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.675445 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/3.log" Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.678166 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerStarted","Data":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.679272 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.891461 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.891569 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.891602 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:32 crc kubenswrapper[5010]: E1126 15:28:32.891692 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:32 crc kubenswrapper[5010]: E1126 15:28:32.891864 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:32 crc kubenswrapper[5010]: E1126 15:28:32.892175 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.899644 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podStartSLOduration=112.899614013 podStartE2EDuration="1m52.899614013s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:32.715085488 +0000 UTC m=+133.505802726" watchObservedRunningTime="2025-11-26 15:28:32.899614013 +0000 UTC m=+133.690331191" Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.901419 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-df2ll"] Nov 26 15:28:32 crc kubenswrapper[5010]: I1126 15:28:32.901574 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:32 crc kubenswrapper[5010]: E1126 15:28:32.901763 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:34 crc kubenswrapper[5010]: I1126 15:28:34.891204 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:34 crc kubenswrapper[5010]: I1126 15:28:34.891316 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:34 crc kubenswrapper[5010]: E1126 15:28:34.892034 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:34 crc kubenswrapper[5010]: I1126 15:28:34.891551 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:34 crc kubenswrapper[5010]: I1126 15:28:34.891437 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:34 crc kubenswrapper[5010]: E1126 15:28:34.892213 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:34 crc kubenswrapper[5010]: E1126 15:28:34.892437 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:34 crc kubenswrapper[5010]: E1126 15:28:34.892580 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:35 crc kubenswrapper[5010]: E1126 15:28:35.001937 5010 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 15:28:36 crc kubenswrapper[5010]: I1126 15:28:36.890970 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:36 crc kubenswrapper[5010]: I1126 15:28:36.891090 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:36 crc kubenswrapper[5010]: I1126 15:28:36.890970 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:36 crc kubenswrapper[5010]: E1126 15:28:36.891176 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:36 crc kubenswrapper[5010]: I1126 15:28:36.891095 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:36 crc kubenswrapper[5010]: E1126 15:28:36.891289 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:36 crc kubenswrapper[5010]: E1126 15:28:36.891412 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:36 crc kubenswrapper[5010]: E1126 15:28:36.891546 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:38 crc kubenswrapper[5010]: I1126 15:28:38.891605 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:38 crc kubenswrapper[5010]: I1126 15:28:38.891791 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:38 crc kubenswrapper[5010]: I1126 15:28:38.891791 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:38 crc kubenswrapper[5010]: I1126 15:28:38.891969 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:38 crc kubenswrapper[5010]: E1126 15:28:38.891964 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-df2ll" podUID="fd9f5a65-e633-439f-8e8d-b760d20a3223" Nov 26 15:28:38 crc kubenswrapper[5010]: E1126 15:28:38.892161 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 15:28:38 crc kubenswrapper[5010]: E1126 15:28:38.892284 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 15:28:38 crc kubenswrapper[5010]: E1126 15:28:38.892380 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.004497 5010 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.064803 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-djqn5"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.065438 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.066455 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-6pfg9"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.066944 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.071110 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.071994 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.072908 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-f6hqp"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.073618 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.088175 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.088515 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.089462 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.090347 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.090914 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.090979 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.090918 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.091777 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.092246 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.093018 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9xndz"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.093599 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.094007 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.094529 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.094835 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.095040 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.095292 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.095545 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.096155 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.096477 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.096510 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.094837 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.097133 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.103430 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.106272 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.108899 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zbm5j"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.109505 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.111613 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-rh2vd"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.111975 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.112830 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-mwpwb"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.113136 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.115252 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.116091 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.116866 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.117474 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.118752 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-p6kxm"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.119106 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-p6kxm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.119798 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.120209 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.121225 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.122102 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.123906 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.124674 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.125951 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.126185 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.126466 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.126576 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.126969 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.127170 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7hblv"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.127253 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.127513 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.127757 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.127774 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.133540 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.140083 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-xzsf5"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.164728 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.166176 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.175926 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.176501 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.176577 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.176783 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177088 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177138 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177278 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177362 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177442 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177512 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177527 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177559 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177607 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177623 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177632 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177728 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177769 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177776 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177869 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177925 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177952 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178010 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178022 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178092 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178131 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178173 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178237 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178255 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177561 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178293 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177730 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178338 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177097 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178392 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178027 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178432 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178485 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178531 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178605 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178647 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178538 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178257 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.179497 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.180096 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.181467 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.181615 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.181780 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.181806 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.181934 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.182076 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.182199 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.177530 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.178434 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.186000 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fgc69"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.186288 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.186668 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-ptfqn"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.186745 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.188603 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.188933 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.191204 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.197303 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.197570 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.197950 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198108 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198214 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198249 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198112 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198489 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198754 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198875 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.198978 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.199205 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.199251 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.199408 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.199588 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.202131 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.203831 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.213617 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.227545 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.227640 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.228271 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.229381 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.238797 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.239760 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.239872 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.239946 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.240589 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-b74pk"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.240802 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.242979 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.243565 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mr9qp"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.244024 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.243996 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.244148 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.244974 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.245193 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.245483 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.247834 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.248389 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gw7ld"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.249569 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.250472 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.251228 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.253280 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-djqn5"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.255236 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.256027 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.257232 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.258001 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.259234 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.259691 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.260776 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.261178 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.262834 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-q284v"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.264195 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.264521 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-x97hz"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.265220 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.267210 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.267471 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.268057 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.268447 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.268854 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.270271 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9xndz"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.271865 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-f6hqp"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.273346 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-6pfg9"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.275815 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276252 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4e14de8c-da99-4612-92ec-50f74d50c547-metrics-tls\") pod \"dns-operator-744455d44c-xzsf5\" (UID: \"4e14de8c-da99-4612-92ec-50f74d50c547\") " pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276281 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9r9t\" (UniqueName: \"kubernetes.io/projected/a544cc25-5303-452d-bbd3-5ac22b642ad7-kube-api-access-x9r9t\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276305 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lm8gv\" (UniqueName: \"kubernetes.io/projected/f0d44623-c021-45d4-bc90-b40247ec17ef-kube-api-access-lm8gv\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276335 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-service-ca\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276352 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-encryption-config\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276371 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-serving-cert\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276388 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tg6gb\" (UniqueName: \"kubernetes.io/projected/718d3669-c82b-4c98-aff8-ea8862a17dca-kube-api-access-tg6gb\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276405 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-audit-policies\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276439 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a544cc25-5303-452d-bbd3-5ac22b642ad7-serving-cert\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276456 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276475 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ed362947-89a1-4af0-843c-fde4fd5b61ec-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-l7l7k\" (UID: \"ed362947-89a1-4af0-843c-fde4fd5b61ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276491 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhcb4\" (UniqueName: \"kubernetes.io/projected/fe4b6236-b05e-415e-ae6b-3404c7562f99-kube-api-access-zhcb4\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276507 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-oauth-serving-cert\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276525 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64xdn\" (UniqueName: \"kubernetes.io/projected/72d7809b-b34d-4536-b98d-44cd347e4b67-kube-api-access-64xdn\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276540 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe4b6236-b05e-415e-ae6b-3404c7562f99-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276558 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-oauth-config\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276575 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/718d3669-c82b-4c98-aff8-ea8862a17dca-machine-approver-tls\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276603 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-config\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276621 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276637 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276652 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276667 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276685 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-service-ca-bundle\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276703 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/718d3669-c82b-4c98-aff8-ea8862a17dca-config\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276742 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm8tl\" (UniqueName: \"kubernetes.io/projected/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-kube-api-access-wm8tl\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276759 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-client-ca\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276774 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276789 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/72d7809b-b34d-4536-b98d-44cd347e4b67-trusted-ca\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276804 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csm9n\" (UniqueName: \"kubernetes.io/projected/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-kube-api-access-csm9n\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276854 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/718d3669-c82b-4c98-aff8-ea8862a17dca-auth-proxy-config\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276872 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5bs9\" (UniqueName: \"kubernetes.io/projected/4e14de8c-da99-4612-92ec-50f74d50c547-kube-api-access-j5bs9\") pod \"dns-operator-744455d44c-xzsf5\" (UID: \"4e14de8c-da99-4612-92ec-50f74d50c547\") " pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276887 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a33ae656-009d-4adb-80ef-143cb00bba21-serving-cert\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276904 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72d7809b-b34d-4536-b98d-44cd347e4b67-config\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276921 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276936 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/80e56b90-699c-4fcd-b69a-748b192fce11-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276953 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqp46\" (UniqueName: \"kubernetes.io/projected/8c454fe1-8825-4c5f-a145-727f16df4b00-kube-api-access-fqp46\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276969 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-etcd-client\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.276989 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277005 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-console-config\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277020 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-trusted-ca-bundle\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277038 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/80e56b90-699c-4fcd-b69a-748b192fce11-images\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277052 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe4b6236-b05e-415e-ae6b-3404c7562f99-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277067 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c454fe1-8825-4c5f-a145-727f16df4b00-serving-cert\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277082 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz4cj\" (UniqueName: \"kubernetes.io/projected/22abed70-9135-4e67-a009-b013ada1f720-kube-api-access-lz4cj\") pod \"downloads-7954f5f757-p6kxm\" (UID: \"22abed70-9135-4e67-a009-b013ada1f720\") " pod="openshift-console/downloads-7954f5f757-p6kxm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277100 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-config\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277117 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a0e77df-c904-48f2-a303-0c024c1fd066-audit-dir\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277133 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwmm4\" (UniqueName: \"kubernetes.io/projected/a33ae656-009d-4adb-80ef-143cb00bba21-kube-api-access-lwmm4\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277149 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80e56b90-699c-4fcd-b69a-748b192fce11-config\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277164 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8c454fe1-8825-4c5f-a145-727f16df4b00-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277181 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l8xdv\" (UniqueName: \"kubernetes.io/projected/ed362947-89a1-4af0-843c-fde4fd5b61ec-kube-api-access-l8xdv\") pod \"cluster-samples-operator-665b6dd947-l7l7k\" (UID: \"ed362947-89a1-4af0-843c-fde4fd5b61ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277209 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztbxj\" (UniqueName: \"kubernetes.io/projected/80e56b90-699c-4fcd-b69a-748b192fce11-kube-api-access-ztbxj\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277227 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-serving-cert\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277243 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4th5\" (UniqueName: \"kubernetes.io/projected/0a0e77df-c904-48f2-a303-0c024c1fd066-kube-api-access-n4th5\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277260 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72d7809b-b34d-4536-b98d-44cd347e4b67-serving-cert\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.277612 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-rh2vd"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.279426 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.280676 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-mwpwb"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.282964 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.282992 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.284148 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.285106 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zbm5j"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.286657 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-xzsf5"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.287645 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.287949 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.289968 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.292173 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fgc69"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.292237 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-p6kxm"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.294085 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.295287 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-x4zhd"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.296391 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.296968 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mr9qp"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.298141 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.300654 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.301908 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.303224 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.307989 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.316242 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.318881 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.320743 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.322140 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7hblv"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.323389 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-tbs79"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.324809 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-rjtpj"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.325041 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.325341 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-b74pk"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.325426 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.327764 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gw7ld"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.328020 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.328900 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.329995 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.331131 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-x97hz"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.332212 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.333214 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-x4zhd"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.334592 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.335689 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.336721 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-q284v"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.337824 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-tbs79"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.338858 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-wjjlq"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.339701 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.339947 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-wjjlq"] Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.347752 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.368384 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378524 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l8xdv\" (UniqueName: \"kubernetes.io/projected/ed362947-89a1-4af0-843c-fde4fd5b61ec-kube-api-access-l8xdv\") pod \"cluster-samples-operator-665b6dd947-l7l7k\" (UID: \"ed362947-89a1-4af0-843c-fde4fd5b61ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378563 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztbxj\" (UniqueName: \"kubernetes.io/projected/80e56b90-699c-4fcd-b69a-748b192fce11-kube-api-access-ztbxj\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378590 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72d7809b-b34d-4536-b98d-44cd347e4b67-serving-cert\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378614 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-serving-cert\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378637 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4th5\" (UniqueName: \"kubernetes.io/projected/0a0e77df-c904-48f2-a303-0c024c1fd066-kube-api-access-n4th5\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378657 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4e14de8c-da99-4612-92ec-50f74d50c547-metrics-tls\") pod \"dns-operator-744455d44c-xzsf5\" (UID: \"4e14de8c-da99-4612-92ec-50f74d50c547\") " pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378674 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9r9t\" (UniqueName: \"kubernetes.io/projected/a544cc25-5303-452d-bbd3-5ac22b642ad7-kube-api-access-x9r9t\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378698 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lm8gv\" (UniqueName: \"kubernetes.io/projected/f0d44623-c021-45d4-bc90-b40247ec17ef-kube-api-access-lm8gv\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378764 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-service-ca\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378787 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-encryption-config\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378810 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-serving-cert\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378830 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-audit-policies\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378851 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tg6gb\" (UniqueName: \"kubernetes.io/projected/718d3669-c82b-4c98-aff8-ea8862a17dca-kube-api-access-tg6gb\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378876 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a544cc25-5303-452d-bbd3-5ac22b642ad7-serving-cert\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378897 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378918 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ed362947-89a1-4af0-843c-fde4fd5b61ec-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-l7l7k\" (UID: \"ed362947-89a1-4af0-843c-fde4fd5b61ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378941 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-oauth-serving-cert\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378966 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhcb4\" (UniqueName: \"kubernetes.io/projected/fe4b6236-b05e-415e-ae6b-3404c7562f99-kube-api-access-zhcb4\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.378992 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64xdn\" (UniqueName: \"kubernetes.io/projected/72d7809b-b34d-4536-b98d-44cd347e4b67-kube-api-access-64xdn\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379019 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-oauth-config\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379044 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe4b6236-b05e-415e-ae6b-3404c7562f99-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379083 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/718d3669-c82b-4c98-aff8-ea8862a17dca-machine-approver-tls\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379111 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379136 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-config\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379160 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-service-ca-bundle\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379186 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379213 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379237 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379285 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm8tl\" (UniqueName: \"kubernetes.io/projected/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-kube-api-access-wm8tl\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379309 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-client-ca\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379331 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379355 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/718d3669-c82b-4c98-aff8-ea8862a17dca-config\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379390 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/72d7809b-b34d-4536-b98d-44cd347e4b67-trusted-ca\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379418 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csm9n\" (UniqueName: \"kubernetes.io/projected/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-kube-api-access-csm9n\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379448 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/718d3669-c82b-4c98-aff8-ea8862a17dca-auth-proxy-config\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379486 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5bs9\" (UniqueName: \"kubernetes.io/projected/4e14de8c-da99-4612-92ec-50f74d50c547-kube-api-access-j5bs9\") pod \"dns-operator-744455d44c-xzsf5\" (UID: \"4e14de8c-da99-4612-92ec-50f74d50c547\") " pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379511 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a33ae656-009d-4adb-80ef-143cb00bba21-serving-cert\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379535 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72d7809b-b34d-4536-b98d-44cd347e4b67-config\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379558 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379585 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/80e56b90-699c-4fcd-b69a-748b192fce11-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379639 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqp46\" (UniqueName: \"kubernetes.io/projected/8c454fe1-8825-4c5f-a145-727f16df4b00-kube-api-access-fqp46\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379665 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-etcd-client\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379688 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-console-config\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379731 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-trusted-ca-bundle\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379755 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/80e56b90-699c-4fcd-b69a-748b192fce11-images\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379777 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c454fe1-8825-4c5f-a145-727f16df4b00-serving-cert\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379800 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe4b6236-b05e-415e-ae6b-3404c7562f99-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379825 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz4cj\" (UniqueName: \"kubernetes.io/projected/22abed70-9135-4e67-a009-b013ada1f720-kube-api-access-lz4cj\") pod \"downloads-7954f5f757-p6kxm\" (UID: \"22abed70-9135-4e67-a009-b013ada1f720\") " pod="openshift-console/downloads-7954f5f757-p6kxm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379847 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-config\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379869 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a0e77df-c904-48f2-a303-0c024c1fd066-audit-dir\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379892 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80e56b90-699c-4fcd-b69a-748b192fce11-config\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379917 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8c454fe1-8825-4c5f-a145-727f16df4b00-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.379941 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwmm4\" (UniqueName: \"kubernetes.io/projected/a33ae656-009d-4adb-80ef-143cb00bba21-kube-api-access-lwmm4\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.380194 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-service-ca\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.380337 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-config\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.380388 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-client-ca\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.381122 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.381281 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/718d3669-c82b-4c98-aff8-ea8862a17dca-config\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.381651 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe4b6236-b05e-415e-ae6b-3404c7562f99-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.381675 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.382379 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a0e77df-c904-48f2-a303-0c024c1fd066-audit-policies\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.383328 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-oauth-serving-cert\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.383988 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72d7809b-b34d-4536-b98d-44cd347e4b67-config\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.384030 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/72d7809b-b34d-4536-b98d-44cd347e4b67-trusted-ca\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.384397 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4e14de8c-da99-4612-92ec-50f74d50c547-metrics-tls\") pod \"dns-operator-744455d44c-xzsf5\" (UID: \"4e14de8c-da99-4612-92ec-50f74d50c547\") " pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.384687 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.384797 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/718d3669-c82b-4c98-aff8-ea8862a17dca-auth-proxy-config\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.384880 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-encryption-config\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.384885 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a0e77df-c904-48f2-a303-0c024c1fd066-audit-dir\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.384944 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/80e56b90-699c-4fcd-b69a-748b192fce11-images\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.385102 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.385321 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-console-config\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.385620 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-config\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.385627 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.385748 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80e56b90-699c-4fcd-b69a-748b192fce11-config\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.385899 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/ed362947-89a1-4af0-843c-fde4fd5b61ec-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-l7l7k\" (UID: \"ed362947-89a1-4af0-843c-fde4fd5b61ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.385985 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/8c454fe1-8825-4c5f-a145-727f16df4b00-available-featuregates\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.386084 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-trusted-ca-bundle\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.386259 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a544cc25-5303-452d-bbd3-5ac22b642ad7-service-ca-bundle\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.386855 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-config\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388061 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-etcd-client\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388090 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-oauth-config\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388104 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8c454fe1-8825-4c5f-a145-727f16df4b00-serving-cert\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388119 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a544cc25-5303-452d-bbd3-5ac22b642ad7-serving-cert\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388240 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-serving-cert\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388237 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72d7809b-b34d-4536-b98d-44cd347e4b67-serving-cert\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388266 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388444 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe4b6236-b05e-415e-ae6b-3404c7562f99-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388466 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/718d3669-c82b-4c98-aff8-ea8862a17dca-machine-approver-tls\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.388616 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0a0e77df-c904-48f2-a303-0c024c1fd066-serving-cert\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.389131 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.389431 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a33ae656-009d-4adb-80ef-143cb00bba21-serving-cert\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.390824 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/80e56b90-699c-4fcd-b69a-748b192fce11-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.407875 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.428330 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.448025 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.468953 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.488187 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.507550 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.527919 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.547433 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.568142 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.588010 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.608201 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.628180 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.648217 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.669183 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.728948 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.749295 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.782578 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785037 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-serving-cert\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785102 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-audit\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785156 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c18f86a9-0cef-41d3-a371-dfcbb46f837f-audit-dir\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785189 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-config\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785240 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9986e410-984a-466f-bb26-b1644bc6c976-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785272 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-client-ca\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785453 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9bpr\" (UniqueName: \"kubernetes.io/projected/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-kube-api-access-h9bpr\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785589 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9986e410-984a-466f-bb26-b1644bc6c976-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-bound-sa-token\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785697 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-etcd-client\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785779 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-registry-tls\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c18f86a9-0cef-41d3-a371-dfcbb46f837f-node-pullsecrets\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.785930 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786125 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-image-import-ca\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786241 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-config\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786274 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-encryption-config\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786343 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786389 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-etcd-serving-ca\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786471 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-registry-certificates\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786502 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786585 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-trusted-ca\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786628 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-serving-cert\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786681 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79qbg\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-kube-api-access-79qbg\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.786733 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f99t2\" (UniqueName: \"kubernetes.io/projected/c18f86a9-0cef-41d3-a371-dfcbb46f837f-kube-api-access-f99t2\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: E1126 15:28:40.787296 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.287264733 +0000 UTC m=+142.077981911 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.788376 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.809213 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.830794 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.859416 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.872287 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888200 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:40 crc kubenswrapper[5010]: E1126 15:28:40.888370 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.388329389 +0000 UTC m=+142.179046567 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888471 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-serving-cert\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888523 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-service-ca\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888561 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-mountpoint-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888595 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888650 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-audit\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888690 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwh9t\" (UniqueName: \"kubernetes.io/projected/7d5bba06-909e-4d1c-b379-53015d91e3fd-kube-api-access-qwh9t\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888756 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adb7407f-43e9-4089-8e7d-64a390f510af-proxy-tls\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888796 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce923c4d-997e-4746-bad1-1f611c9a67d6-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888833 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp448\" (UniqueName: \"kubernetes.io/projected/5a3ecd68-6560-460d-b6ac-53209faecbd8-kube-api-access-vp448\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888899 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gf54w\" (UniqueName: \"kubernetes.io/projected/69ee19f7-5399-4651-87c8-722ccfbb7e74-kube-api-access-gf54w\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888935 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j97m\" (UniqueName: \"kubernetes.io/projected/3d1c114a-859f-4dd2-8bd5-79f55b713703-kube-api-access-5j97m\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.888991 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-config\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889025 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce923c4d-997e-4746-bad1-1f611c9a67d6-config\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889058 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889093 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs6wm\" (UniqueName: \"kubernetes.io/projected/d8454588-9ece-42d7-a263-74f7026f4ebe-kube-api-access-bs6wm\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889105 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889280 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-config\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889410 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9986e410-984a-466f-bb26-b1644bc6c976-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889475 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-images\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.889586 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890267 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d5bba06-909e-4d1c-b379-53015d91e3fd-config-volume\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890344 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890435 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890489 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/446fb8a2-da33-4281-a0bf-98d3450a22e7-secret-volume\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890490 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9986e410-984a-466f-bb26-b1644bc6c976-ca-trust-extracted\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890536 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2a8c8951-a5bf-45bc-ae21-bea34aee0143-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-b74pk\" (UID: \"2a8c8951-a5bf-45bc-ae21-bea34aee0143\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890615 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbrbp\" (UniqueName: \"kubernetes.io/projected/3b7d4779-2c4c-4555-982c-f79ed9d7474c-kube-api-access-sbrbp\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890760 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzxw9\" (UniqueName: \"kubernetes.io/projected/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-kube-api-access-kzxw9\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890816 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890843 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891052 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891249 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-config\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891340 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.890828 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-serving-cert\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891509 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891573 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23a37a68-f3a2-451a-b7ab-714614d77140-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891627 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57f5f\" (UniqueName: \"kubernetes.io/projected/6feef64c-3e2b-4f00-a30c-e35cb976384d-kube-api-access-57f5f\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891743 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-etcd-client\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891791 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adb7407f-43e9-4089-8e7d-64a390f510af-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891833 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce923c4d-997e-4746-bad1-1f611c9a67d6-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.891965 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgdpx\" (UniqueName: \"kubernetes.io/projected/cfe5e77c-7835-4193-b9ed-1df72669ea3d-kube-api-access-hgdpx\") pod \"migrator-59844c95c7-l97vm\" (UID: \"cfe5e77c-7835-4193-b9ed-1df72669ea3d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892006 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-dir\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892064 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-client\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892133 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-image-import-ca\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892193 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs8nh\" (UniqueName: \"kubernetes.io/projected/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-kube-api-access-zs8nh\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892243 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-service-ca-bundle\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892359 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6feef64c-3e2b-4f00-a30c-e35cb976384d-metrics-tls\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892446 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-config\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892536 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqvnr\" (UniqueName: \"kubernetes.io/projected/68828445-ea1e-4df1-ba75-76f1179b5341-kube-api-access-fqvnr\") pod \"package-server-manager-789f6589d5-qh68l\" (UID: \"68828445-ea1e-4df1-ba75-76f1179b5341\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892619 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.892684 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6feef64c-3e2b-4f00-a30c-e35cb976384d-trusted-ca\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.893247 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-audit\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.894226 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-certs\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.894515 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a0024c1b-b48e-4609-b132-e7078313d8ae-webhook-cert\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.894607 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.894680 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-node-bootstrap-token\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.895539 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.895698 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db957cb2-16f7-4282-9209-a3228efb3c20-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.895787 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-image-import-ca\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.895934 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1febbf8b-6b1c-444e-ab84-5bebe58cb635-srv-cert\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.896451 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-trusted-ca\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.896567 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-default-certificate\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.896578 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-config\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.896816 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prbsf\" (UniqueName: \"kubernetes.io/projected/a0024c1b-b48e-4609-b132-e7078313d8ae-kube-api-access-prbsf\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.897049 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1383de27-90fb-498e-8e3e-b622760bfb96-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sxvhn\" (UID: \"1383de27-90fb-498e-8e3e-b622760bfb96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.897306 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.897382 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shwqc\" (UniqueName: \"kubernetes.io/projected/1383de27-90fb-498e-8e3e-b622760bfb96-kube-api-access-shwqc\") pod \"control-plane-machine-set-operator-78cbb6b69f-sxvhn\" (UID: \"1383de27-90fb-498e-8e3e-b622760bfb96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.897483 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db957cb2-16f7-4282-9209-a3228efb3c20-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.897536 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmfwm\" (UniqueName: \"kubernetes.io/projected/1febbf8b-6b1c-444e-ab84-5bebe58cb635-kube-api-access-mmfwm\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.897636 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a0024c1b-b48e-4609-b132-e7078313d8ae-tmpfs\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.897809 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1febbf8b-6b1c-444e-ab84-5bebe58cb635-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.898383 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f99t2\" (UniqueName: \"kubernetes.io/projected/c18f86a9-0cef-41d3-a371-dfcbb46f837f-kube-api-access-f99t2\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.898446 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a0024c1b-b48e-4609-b132-e7078313d8ae-apiservice-cert\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.898522 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/68828445-ea1e-4df1-ba75-76f1179b5341-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qh68l\" (UID: \"68828445-ea1e-4df1-ba75-76f1179b5341\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.898645 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7qb5\" (UniqueName: \"kubernetes.io/projected/7e26d790-6dd2-4e6e-8e21-8b791f39744e-kube-api-access-c7qb5\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.898732 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/db957cb2-16f7-4282-9209-a3228efb3c20-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.898810 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tlhzt\" (UniqueName: \"kubernetes.io/projected/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-kube-api-access-tlhzt\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.898880 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-policies\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899010 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899121 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c18f86a9-0cef-41d3-a371-dfcbb46f837f-audit-dir\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899279 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a37a68-f3a2-451a-b7ab-714614d77140-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899291 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c18f86a9-0cef-41d3-a371-dfcbb46f837f-audit-dir\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899394 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-serving-cert\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899500 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-client-ca\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899632 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899788 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7whz\" (UniqueName: \"kubernetes.io/projected/adb7407f-43e9-4089-8e7d-64a390f510af-kube-api-access-h7whz\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899881 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9bpr\" (UniqueName: \"kubernetes.io/projected/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-kube-api-access-h9bpr\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.899935 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.900002 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz5rq\" (UniqueName: \"kubernetes.io/projected/f09f1157-dffe-47b5-8241-083a8b5ed7a9-kube-api-access-bz5rq\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.900090 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-config\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.900161 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a37a68-f3a2-451a-b7ab-714614d77140-config\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.900829 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9986e410-984a-466f-bb26-b1644bc6c976-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.900919 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-bound-sa-token\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.900989 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tq9m\" (UniqueName: \"kubernetes.io/projected/161483a8-ced1-4cb7-9b9f-e4d2e983b901-kube-api-access-8tq9m\") pod \"ingress-canary-wjjlq\" (UID: \"161483a8-ced1-4cb7-9b9f-e4d2e983b901\") " pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.901345 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-client-ca\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.902356 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-trusted-ca-bundle\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.902357 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6feef64c-3e2b-4f00-a30c-e35cb976384d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.902655 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.902942 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f09f1157-dffe-47b5-8241-083a8b5ed7a9-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.903003 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqjhs\" (UniqueName: \"kubernetes.io/projected/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-kube-api-access-tqjhs\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.903104 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-registry-tls\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.903296 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c18f86a9-0cef-41d3-a371-dfcbb46f837f-node-pullsecrets\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.903526 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/c18f86a9-0cef-41d3-a371-dfcbb46f837f-node-pullsecrets\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.903516 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-socket-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.904005 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.904420 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.904631 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f09f1157-dffe-47b5-8241-083a8b5ed7a9-srv-cert\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.904852 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-registration-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.905191 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v477m\" (UniqueName: \"kubernetes.io/projected/2a8c8951-a5bf-45bc-ae21-bea34aee0143-kube-api-access-v477m\") pod \"multus-admission-controller-857f4d67dd-b74pk\" (UID: \"2a8c8951-a5bf-45bc-ae21-bea34aee0143\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.905390 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d5bba06-909e-4d1c-b379-53015d91e3fd-metrics-tls\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.905535 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-metrics-certs\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.905786 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-encryption-config\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.905892 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.905886 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-trusted-ca\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.905992 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-etcd-serving-ca\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906392 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9qls\" (UniqueName: \"kubernetes.io/projected/446fb8a2-da33-4281-a0bf-98d3450a22e7-kube-api-access-d9qls\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906482 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:40 crc kubenswrapper[5010]: E1126 15:28:40.906499 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.406446791 +0000 UTC m=+142.197163989 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906576 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-stats-auth\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906630 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-csi-data-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906682 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-proxy-tls\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906806 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-ca\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906886 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-registry-certificates\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.906967 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-plugins-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907016 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/161483a8-ced1-4cb7-9b9f-e4d2e983b901-cert\") pod \"ingress-canary-wjjlq\" (UID: \"161483a8-ced1-4cb7-9b9f-e4d2e983b901\") " pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907072 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907126 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-serving-cert\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907208 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69ee19f7-5399-4651-87c8-722ccfbb7e74-serving-cert\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907307 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907459 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/446fb8a2-da33-4281-a0bf-98d3450a22e7-config-volume\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907523 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d8454588-9ece-42d7-a263-74f7026f4ebe-signing-cabundle\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907576 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d8454588-9ece-42d7-a263-74f7026f4ebe-signing-key\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.907617 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79qbg\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-kube-api-access-79qbg\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.908195 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9986e410-984a-466f-bb26-b1644bc6c976-installation-pull-secrets\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.908768 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-etcd-client\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.909867 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.910037 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c18f86a9-0cef-41d3-a371-dfcbb46f837f-etcd-serving-ca\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.912635 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-serving-cert\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.912684 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-registry-certificates\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.915893 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c18f86a9-0cef-41d3-a371-dfcbb46f837f-encryption-config\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.916199 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-registry-tls\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.928593 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.948679 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.969842 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 15:28:40 crc kubenswrapper[5010]: I1126 15:28:40.988093 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.008425 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.008822 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.009096 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.509070482 +0000 UTC m=+142.299787650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009128 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2a8c8951-a5bf-45bc-ae21-bea34aee0143-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-b74pk\" (UID: \"2a8c8951-a5bf-45bc-ae21-bea34aee0143\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009175 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbrbp\" (UniqueName: \"kubernetes.io/projected/3b7d4779-2c4c-4555-982c-f79ed9d7474c-kube-api-access-sbrbp\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009212 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzxw9\" (UniqueName: \"kubernetes.io/projected/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-kube-api-access-kzxw9\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009265 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-serving-cert\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009302 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009337 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23a37a68-f3a2-451a-b7ab-714614d77140-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009368 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57f5f\" (UniqueName: \"kubernetes.io/projected/6feef64c-3e2b-4f00-a30c-e35cb976384d-kube-api-access-57f5f\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009403 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adb7407f-43e9-4089-8e7d-64a390f510af-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009432 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce923c4d-997e-4746-bad1-1f611c9a67d6-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009463 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgdpx\" (UniqueName: \"kubernetes.io/projected/cfe5e77c-7835-4193-b9ed-1df72669ea3d-kube-api-access-hgdpx\") pod \"migrator-59844c95c7-l97vm\" (UID: \"cfe5e77c-7835-4193-b9ed-1df72669ea3d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009494 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-dir\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009536 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-client\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009614 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs8nh\" (UniqueName: \"kubernetes.io/projected/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-kube-api-access-zs8nh\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009647 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-service-ca-bundle\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009684 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqvnr\" (UniqueName: \"kubernetes.io/projected/68828445-ea1e-4df1-ba75-76f1179b5341-kube-api-access-fqvnr\") pod \"package-server-manager-789f6589d5-qh68l\" (UID: \"68828445-ea1e-4df1-ba75-76f1179b5341\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009739 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6feef64c-3e2b-4f00-a30c-e35cb976384d-metrics-tls\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009805 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009836 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6feef64c-3e2b-4f00-a30c-e35cb976384d-trusted-ca\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009866 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-certs\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009900 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a0024c1b-b48e-4609-b132-e7078313d8ae-webhook-cert\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009930 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db957cb2-16f7-4282-9209-a3228efb3c20-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.009961 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010009 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-node-bootstrap-token\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010071 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1febbf8b-6b1c-444e-ab84-5bebe58cb635-srv-cert\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010105 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-default-certificate\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010140 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1383de27-90fb-498e-8e3e-b622760bfb96-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sxvhn\" (UID: \"1383de27-90fb-498e-8e3e-b622760bfb96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010177 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prbsf\" (UniqueName: \"kubernetes.io/projected/a0024c1b-b48e-4609-b132-e7078313d8ae-kube-api-access-prbsf\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010222 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010257 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shwqc\" (UniqueName: \"kubernetes.io/projected/1383de27-90fb-498e-8e3e-b622760bfb96-kube-api-access-shwqc\") pod \"control-plane-machine-set-operator-78cbb6b69f-sxvhn\" (UID: \"1383de27-90fb-498e-8e3e-b622760bfb96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010290 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db957cb2-16f7-4282-9209-a3228efb3c20-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010323 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmfwm\" (UniqueName: \"kubernetes.io/projected/1febbf8b-6b1c-444e-ab84-5bebe58cb635-kube-api-access-mmfwm\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010352 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a0024c1b-b48e-4609-b132-e7078313d8ae-tmpfs\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010394 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1febbf8b-6b1c-444e-ab84-5bebe58cb635-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010433 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/68828445-ea1e-4df1-ba75-76f1179b5341-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qh68l\" (UID: \"68828445-ea1e-4df1-ba75-76f1179b5341\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010473 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a0024c1b-b48e-4609-b132-e7078313d8ae-apiservice-cert\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010507 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/db957cb2-16f7-4282-9209-a3228efb3c20-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010540 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7qb5\" (UniqueName: \"kubernetes.io/projected/7e26d790-6dd2-4e6e-8e21-8b791f39744e-kube-api-access-c7qb5\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010578 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tlhzt\" (UniqueName: \"kubernetes.io/projected/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-kube-api-access-tlhzt\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010611 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-policies\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010645 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010695 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a37a68-f3a2-451a-b7ab-714614d77140-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010762 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7whz\" (UniqueName: \"kubernetes.io/projected/adb7407f-43e9-4089-8e7d-64a390f510af-kube-api-access-h7whz\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010799 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010860 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010920 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bz5rq\" (UniqueName: \"kubernetes.io/projected/f09f1157-dffe-47b5-8241-083a8b5ed7a9-kube-api-access-bz5rq\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010955 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-config\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.010988 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a37a68-f3a2-451a-b7ab-714614d77140-config\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011043 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tq9m\" (UniqueName: \"kubernetes.io/projected/161483a8-ced1-4cb7-9b9f-e4d2e983b901-kube-api-access-8tq9m\") pod \"ingress-canary-wjjlq\" (UID: \"161483a8-ced1-4cb7-9b9f-e4d2e983b901\") " pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011078 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6feef64c-3e2b-4f00-a30c-e35cb976384d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011115 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011182 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f09f1157-dffe-47b5-8241-083a8b5ed7a9-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011226 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqjhs\" (UniqueName: \"kubernetes.io/projected/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-kube-api-access-tqjhs\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011267 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-socket-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011303 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-registration-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011340 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v477m\" (UniqueName: \"kubernetes.io/projected/2a8c8951-a5bf-45bc-ae21-bea34aee0143-kube-api-access-v477m\") pod \"multus-admission-controller-857f4d67dd-b74pk\" (UID: \"2a8c8951-a5bf-45bc-ae21-bea34aee0143\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011376 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011407 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f09f1157-dffe-47b5-8241-083a8b5ed7a9-srv-cert\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011444 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d5bba06-909e-4d1c-b379-53015d91e3fd-metrics-tls\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011482 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-metrics-certs\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011550 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011591 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9qls\" (UniqueName: \"kubernetes.io/projected/446fb8a2-da33-4281-a0bf-98d3450a22e7-kube-api-access-d9qls\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011628 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-stats-auth\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011664 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-csi-data-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011700 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-proxy-tls\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011776 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-ca\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011812 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-plugins-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011846 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/161483a8-ced1-4cb7-9b9f-e4d2e983b901-cert\") pod \"ingress-canary-wjjlq\" (UID: \"161483a8-ced1-4cb7-9b9f-e4d2e983b901\") " pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011881 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69ee19f7-5399-4651-87c8-722ccfbb7e74-serving-cert\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011917 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011960 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.011997 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/446fb8a2-da33-4281-a0bf-98d3450a22e7-config-volume\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012036 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d8454588-9ece-42d7-a263-74f7026f4ebe-signing-cabundle\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012070 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d8454588-9ece-42d7-a263-74f7026f4ebe-signing-key\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012131 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-service-ca\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012170 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-mountpoint-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012207 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012257 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwh9t\" (UniqueName: \"kubernetes.io/projected/7d5bba06-909e-4d1c-b379-53015d91e3fd-kube-api-access-qwh9t\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012291 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adb7407f-43e9-4089-8e7d-64a390f510af-proxy-tls\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012326 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce923c4d-997e-4746-bad1-1f611c9a67d6-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012360 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp448\" (UniqueName: \"kubernetes.io/projected/5a3ecd68-6560-460d-b6ac-53209faecbd8-kube-api-access-vp448\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012396 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gf54w\" (UniqueName: \"kubernetes.io/projected/69ee19f7-5399-4651-87c8-722ccfbb7e74-kube-api-access-gf54w\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012430 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j97m\" (UniqueName: \"kubernetes.io/projected/3d1c114a-859f-4dd2-8bd5-79f55b713703-kube-api-access-5j97m\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012467 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-config\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012507 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce923c4d-997e-4746-bad1-1f611c9a67d6-config\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012541 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012576 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs6wm\" (UniqueName: \"kubernetes.io/projected/d8454588-9ece-42d7-a263-74f7026f4ebe-kube-api-access-bs6wm\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012628 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-images\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012664 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012734 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012774 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d5bba06-909e-4d1c-b379-53015d91e3fd-config-volume\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012813 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012848 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/446fb8a2-da33-4281-a0bf-98d3450a22e7-secret-volume\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.012969 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-service-ca-bundle\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.014002 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db957cb2-16f7-4282-9209-a3228efb3c20-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.014647 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/a0024c1b-b48e-4609-b132-e7078313d8ae-tmpfs\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.014881 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-csi-data-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.015341 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-registration-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.016278 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-config\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.016286 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-socket-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.017037 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/23a37a68-f3a2-451a-b7ab-714614d77140-config\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.017523 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.017910 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6feef64c-3e2b-4f00-a30c-e35cb976384d-metrics-tls\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.018288 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-auth-proxy-config\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.018666 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.518648642 +0000 UTC m=+142.309365800 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.019179 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6feef64c-3e2b-4f00-a30c-e35cb976384d-trusted-ca\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.019490 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.019935 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/db957cb2-16f7-4282-9209-a3228efb3c20-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.021681 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-stats-auth\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.021824 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-dir\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.022036 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-ca\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.022135 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.022656 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/2a8c8951-a5bf-45bc-ae21-bea34aee0143-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-b74pk\" (UID: \"2a8c8951-a5bf-45bc-ae21-bea34aee0143\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.022840 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-plugins-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.022957 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/5a3ecd68-6560-460d-b6ac-53209faecbd8-mountpoint-dir\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.023015 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-service-ca\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.023037 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/adb7407f-43e9-4089-8e7d-64a390f510af-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.023864 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/23a37a68-f3a2-451a-b7ab-714614d77140-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.024303 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/1383de27-90fb-498e-8e3e-b622760bfb96-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-sxvhn\" (UID: \"1383de27-90fb-498e-8e3e-b622760bfb96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.024696 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-metrics-certs\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.025119 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-default-certificate\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.025860 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/69ee19f7-5399-4651-87c8-722ccfbb7e74-etcd-client\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.026385 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/adb7407f-43e9-4089-8e7d-64a390f510af-proxy-tls\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.026583 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.029946 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.034914 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/69ee19f7-5399-4651-87c8-722ccfbb7e74-serving-cert\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.048042 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.075419 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.085065 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.093933 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.102137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.108976 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.114105 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.114366 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.614335967 +0000 UTC m=+142.405053155 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.115168 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.115635 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.615614543 +0000 UTC m=+142.406331751 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.120987 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.129627 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.140678 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.148489 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.168781 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.188473 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.200669 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.208212 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.212975 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-policies\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.217073 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.217318 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.717280216 +0000 UTC m=+142.507997394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.217774 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.218589 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.718553582 +0000 UTC m=+142.509270760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.246035 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.248664 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.251868 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.253445 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.267618 5010 request.go:700] Waited for 1.017500898s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/secrets?fieldSelector=metadata.name%3Dv4-0-config-user-template-provider-selection&limit=500&resourceVersion=0 Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.270378 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.282236 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.288986 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.296907 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.309551 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.318594 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.319414 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.320305 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.820276117 +0000 UTC m=+142.610993295 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.320536 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.321261 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.821231544 +0000 UTC m=+142.611948732 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.329597 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.349631 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.362389 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.369253 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.390385 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.409283 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.422658 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.422781 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.423030 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.423557 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.923524315 +0000 UTC m=+142.714241503 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.423992 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.424490 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:41.924473512 +0000 UTC m=+142.715190690 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.429011 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.443192 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/68828445-ea1e-4df1-ba75-76f1179b5341-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-qh68l\" (UID: \"68828445-ea1e-4df1-ba75-76f1179b5341\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.447898 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.457935 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-images\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.470188 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.479580 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-proxy-tls\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.489224 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.507884 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.517485 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1febbf8b-6b1c-444e-ab84-5bebe58cb635-srv-cert\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.525972 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.526974 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.026956539 +0000 UTC m=+142.817673707 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.528997 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.539148 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/446fb8a2-da33-4281-a0bf-98d3450a22e7-secret-volume\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.539979 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1febbf8b-6b1c-444e-ab84-5bebe58cb635-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.541022 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f09f1157-dffe-47b5-8241-083a8b5ed7a9-profile-collector-cert\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.550099 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.561000 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f09f1157-dffe-47b5-8241-083a8b5ed7a9-srv-cert\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.569309 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.578288 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a0024c1b-b48e-4609-b132-e7078313d8ae-webhook-cert\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.582081 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a0024c1b-b48e-4609-b132-e7078313d8ae-apiservice-cert\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.589248 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.609217 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.630309 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.631346 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.131315278 +0000 UTC m=+142.922032456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.632135 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.648880 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.661160 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-serving-cert\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.668141 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.679623 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-config\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.689576 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.708105 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.728614 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.733413 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.733917 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.233891768 +0000 UTC m=+143.024608926 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.733998 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.734542 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.234524525 +0000 UTC m=+143.025241713 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.741807 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/d8454588-9ece-42d7-a263-74f7026f4ebe-signing-key\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.749920 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.754234 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/d8454588-9ece-42d7-a263-74f7026f4ebe-signing-cabundle\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.767970 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.789256 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.797933 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ce923c4d-997e-4746-bad1-1f611c9a67d6-config\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.808276 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.828678 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.835117 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.835654 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.335623933 +0000 UTC m=+143.126341121 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.836048 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ce923c4d-997e-4746-bad1-1f611c9a67d6-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.848912 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.867924 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.871844 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/446fb8a2-da33-4281-a0bf-98d3450a22e7-config-volume\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.888741 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.907922 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.928786 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.937359 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7d5bba06-909e-4d1c-b379-53015d91e3fd-config-volume\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.937408 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:41 crc kubenswrapper[5010]: E1126 15:28:41.937742 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.437727618 +0000 UTC m=+143.228444766 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.948756 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.960652 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/7d5bba06-909e-4d1c-b379-53015d91e3fd-metrics-tls\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.969009 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 15:28:41 crc kubenswrapper[5010]: I1126 15:28:41.988138 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.007383 5010 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.019243 5010 secret.go:188] Couldn't get secret openshift-machine-config-operator/machine-config-server-tls: failed to sync secret cache: timed out waiting for the condition Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.019962 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-certs podName:3b7d4779-2c4c-4555-982c-f79ed9d7474c nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.519937052 +0000 UTC m=+143.310654200 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "certs" (UniqueName: "kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-certs") pod "machine-config-server-rjtpj" (UID: "3b7d4779-2c4c-4555-982c-f79ed9d7474c") : failed to sync secret cache: timed out waiting for the condition Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.020151 5010 secret.go:188] Couldn't get secret openshift-machine-config-operator/node-bootstrapper-token: failed to sync secret cache: timed out waiting for the condition Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.020337 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-node-bootstrap-token podName:3b7d4779-2c4c-4555-982c-f79ed9d7474c nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.520297342 +0000 UTC m=+143.311014560 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "node-bootstrap-token" (UniqueName: "kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-node-bootstrap-token") pod "machine-config-server-rjtpj" (UID: "3b7d4779-2c4c-4555-982c-f79ed9d7474c") : failed to sync secret cache: timed out waiting for the condition Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.023623 5010 secret.go:188] Couldn't get secret openshift-ingress-canary/canary-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.023769 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/161483a8-ced1-4cb7-9b9f-e4d2e983b901-cert podName:161483a8-ced1-4cb7-9b9f-e4d2e983b901 nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.523733389 +0000 UTC m=+143.314450627 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/161483a8-ced1-4cb7-9b9f-e4d2e983b901-cert") pod "ingress-canary-wjjlq" (UID: "161483a8-ced1-4cb7-9b9f-e4d2e983b901") : failed to sync secret cache: timed out waiting for the condition Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.028485 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.038668 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.038932 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.538897848 +0000 UTC m=+143.329615006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.040840 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.041285 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.541272084 +0000 UTC m=+143.331989242 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.048010 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.068537 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.089340 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.109340 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.127856 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.142377 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.142599 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.642566167 +0000 UTC m=+143.433283325 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.143816 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.144265 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.644254355 +0000 UTC m=+143.434971513 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.148625 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.197783 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l8xdv\" (UniqueName: \"kubernetes.io/projected/ed362947-89a1-4af0-843c-fde4fd5b61ec-kube-api-access-l8xdv\") pod \"cluster-samples-operator-665b6dd947-l7l7k\" (UID: \"ed362947-89a1-4af0-843c-fde4fd5b61ec\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.216827 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4th5\" (UniqueName: \"kubernetes.io/projected/0a0e77df-c904-48f2-a303-0c024c1fd066-kube-api-access-n4th5\") pod \"apiserver-7bbb656c7d-6bsj2\" (UID: \"0a0e77df-c904-48f2-a303-0c024c1fd066\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.237093 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztbxj\" (UniqueName: \"kubernetes.io/projected/80e56b90-699c-4fcd-b69a-748b192fce11-kube-api-access-ztbxj\") pod \"machine-api-operator-5694c8668f-f6hqp\" (UID: \"80e56b90-699c-4fcd-b69a-748b192fce11\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.245021 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.245310 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.74527052 +0000 UTC m=+143.535987708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.246394 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.247179 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.747150883 +0000 UTC m=+143.537868071 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.256985 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lm8gv\" (UniqueName: \"kubernetes.io/projected/f0d44623-c021-45d4-bc90-b40247ec17ef-kube-api-access-lm8gv\") pod \"console-f9d7485db-rh2vd\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.265497 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9r9t\" (UniqueName: \"kubernetes.io/projected/a544cc25-5303-452d-bbd3-5ac22b642ad7-kube-api-access-x9r9t\") pod \"authentication-operator-69f744f599-zbm5j\" (UID: \"a544cc25-5303-452d-bbd3-5ac22b642ad7\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.280518 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.286825 5010 request.go:700] Waited for 1.906118512s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-config-operator/serviceaccounts/openshift-config-operator/token Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.297842 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwmm4\" (UniqueName: \"kubernetes.io/projected/a33ae656-009d-4adb-80ef-143cb00bba21-kube-api-access-lwmm4\") pod \"route-controller-manager-6576b87f9c-tmngt\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.317015 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqp46\" (UniqueName: \"kubernetes.io/projected/8c454fe1-8825-4c5f-a145-727f16df4b00-kube-api-access-fqp46\") pod \"openshift-config-operator-7777fb866f-9xndz\" (UID: \"8c454fe1-8825-4c5f-a145-727f16df4b00\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.329988 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64xdn\" (UniqueName: \"kubernetes.io/projected/72d7809b-b34d-4536-b98d-44cd347e4b67-kube-api-access-64xdn\") pod \"console-operator-58897d9998-mwpwb\" (UID: \"72d7809b-b34d-4536-b98d-44cd347e4b67\") " pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.345911 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.347391 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.347593 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.847563271 +0000 UTC m=+143.638280449 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.347895 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.348224 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.348885 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.848854177 +0000 UTC m=+143.639571345 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.358416 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm8tl\" (UniqueName: \"kubernetes.io/projected/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-kube-api-access-wm8tl\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.358745 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.365826 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.368544 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhcb4\" (UniqueName: \"kubernetes.io/projected/fe4b6236-b05e-415e-ae6b-3404c7562f99-kube-api-access-zhcb4\") pod \"openshift-controller-manager-operator-756b6f6bc6-jjtsh\" (UID: \"fe4b6236-b05e-415e-ae6b-3404c7562f99\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.373173 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.384830 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.398275 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tg6gb\" (UniqueName: \"kubernetes.io/projected/718d3669-c82b-4c98-aff8-ea8862a17dca-kube-api-access-tg6gb\") pod \"machine-approver-56656f9798-h7gv8\" (UID: \"718d3669-c82b-4c98-aff8-ea8862a17dca\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.422902 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.428305 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5bs9\" (UniqueName: \"kubernetes.io/projected/4e14de8c-da99-4612-92ec-50f74d50c547-kube-api-access-j5bs9\") pod \"dns-operator-744455d44c-xzsf5\" (UID: \"4e14de8c-da99-4612-92ec-50f74d50c547\") " pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.438380 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.440245 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csm9n\" (UniqueName: \"kubernetes.io/projected/ed24d6fa-b5ba-445c-8e3c-cba4963b89f8-kube-api-access-csm9n\") pod \"openshift-apiserver-operator-796bbdcf4f-ztgkq\" (UID: \"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.446329 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.449156 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.449394 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.949358678 +0000 UTC m=+143.740075836 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.450212 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.451227 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:42.95120281 +0000 UTC m=+143.741919968 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.477840 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/46a864b2-355a-4c4f-bcf1-36f4e1eeec24-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-hhw28\" (UID: \"46a864b2-355a-4c4f-bcf1-36f4e1eeec24\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.488458 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.509009 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.525578 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz4cj\" (UniqueName: \"kubernetes.io/projected/22abed70-9135-4e67-a009-b013ada1f720-kube-api-access-lz4cj\") pod \"downloads-7954f5f757-p6kxm\" (UID: \"22abed70-9135-4e67-a009-b013ada1f720\") " pod="openshift-console/downloads-7954f5f757-p6kxm" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.528564 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.552405 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.552522 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.552816 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/161483a8-ced1-4cb7-9b9f-e4d2e983b901-cert\") pod \"ingress-canary-wjjlq\" (UID: \"161483a8-ced1-4cb7-9b9f-e4d2e983b901\") " pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.552950 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.052912855 +0000 UTC m=+143.843630003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.553149 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-certs\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.553190 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-node-bootstrap-token\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.559934 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/161483a8-ced1-4cb7-9b9f-e4d2e983b901-cert\") pod \"ingress-canary-wjjlq\" (UID: \"161483a8-ced1-4cb7-9b9f-e4d2e983b901\") " pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.559985 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-certs\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.560423 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/3b7d4779-2c4c-4555-982c-f79ed9d7474c-node-bootstrap-token\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.570086 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.590552 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.602330 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.625188 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f99t2\" (UniqueName: \"kubernetes.io/projected/c18f86a9-0cef-41d3-a371-dfcbb46f837f-kube-api-access-f99t2\") pod \"apiserver-76f77b778f-djqn5\" (UID: \"c18f86a9-0cef-41d3-a371-dfcbb46f837f\") " pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.648402 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9bpr\" (UniqueName: \"kubernetes.io/projected/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-kube-api-access-h9bpr\") pod \"controller-manager-879f6c89f-6pfg9\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.656928 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.657448 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.157434549 +0000 UTC m=+143.948151697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.667485 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-bound-sa-token\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.695877 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-f6hqp"] Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.696014 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79qbg\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-kube-api-access-79qbg\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.704560 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.705738 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqvnr\" (UniqueName: \"kubernetes.io/projected/68828445-ea1e-4df1-ba75-76f1179b5341-kube-api-access-fqvnr\") pod \"package-server-manager-789f6589d5-qh68l\" (UID: \"68828445-ea1e-4df1-ba75-76f1179b5341\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.713102 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-p6kxm" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.725099 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shwqc\" (UniqueName: \"kubernetes.io/projected/1383de27-90fb-498e-8e3e-b622760bfb96-kube-api-access-shwqc\") pod \"control-plane-machine-set-operator-78cbb6b69f-sxvhn\" (UID: \"1383de27-90fb-498e-8e3e-b622760bfb96\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.733860 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.737586 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" event={"ID":"718d3669-c82b-4c98-aff8-ea8862a17dca","Type":"ContainerStarted","Data":"dff8ef43e41775df741c1fc02487cfbcdbaee5d1e3c31ad0c7d5b63f6d7ac2ce"} Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.738635 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" event={"ID":"80e56b90-699c-4fcd-b69a-748b192fce11","Type":"ContainerStarted","Data":"2a4b6390b462783dcae1f6817e029523b764967c4d2747136e12e7a50b84ca79"} Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.755522 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmfwm\" (UniqueName: \"kubernetes.io/projected/1febbf8b-6b1c-444e-ab84-5bebe58cb635-kube-api-access-mmfwm\") pod \"catalog-operator-68c6474976-ncsqq\" (UID: \"1febbf8b-6b1c-444e-ab84-5bebe58cb635\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.758473 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.758668 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.258630329 +0000 UTC m=+144.049347477 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.758872 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.759287 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.259278257 +0000 UTC m=+144.049995395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.761969 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ce923c4d-997e-4746-bad1-1f611c9a67d6-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-rgw5m\" (UID: \"ce923c4d-997e-4746-bad1-1f611c9a67d6\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.765267 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-mwpwb"] Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.786366 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp448\" (UniqueName: \"kubernetes.io/projected/5a3ecd68-6560-460d-b6ac-53209faecbd8-kube-api-access-vp448\") pod \"csi-hostpathplugin-tbs79\" (UID: \"5a3ecd68-6560-460d-b6ac-53209faecbd8\") " pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.800536 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gf54w\" (UniqueName: \"kubernetes.io/projected/69ee19f7-5399-4651-87c8-722ccfbb7e74-kube-api-access-gf54w\") pod \"etcd-operator-b45778765-fgc69\" (UID: \"69ee19f7-5399-4651-87c8-722ccfbb7e74\") " pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.816642 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.831656 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.835015 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j97m\" (UniqueName: \"kubernetes.io/projected/3d1c114a-859f-4dd2-8bd5-79f55b713703-kube-api-access-5j97m\") pod \"oauth-openshift-558db77b4-gw7ld\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.842900 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.863605 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.363584725 +0000 UTC m=+144.154301873 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.863627 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.864616 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.865586 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbrbp\" (UniqueName: \"kubernetes.io/projected/3b7d4779-2c4c-4555-982c-f79ed9d7474c-kube-api-access-sbrbp\") pod \"machine-config-server-rjtpj\" (UID: \"3b7d4779-2c4c-4555-982c-f79ed9d7474c\") " pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.866104 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.869348 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.369327258 +0000 UTC m=+144.160044406 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.874349 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.882513 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57f5f\" (UniqueName: \"kubernetes.io/projected/6feef64c-3e2b-4f00-a30c-e35cb976384d-kube-api-access-57f5f\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.889893 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.894543 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-rh2vd"] Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.896419 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzxw9\" (UniqueName: \"kubernetes.io/projected/5033a0ed-28fe-481b-b9a8-2f68a0fb3330-kube-api-access-kzxw9\") pod \"service-ca-operator-777779d784-q284v\" (UID: \"5033a0ed-28fe-481b-b9a8-2f68a0fb3330\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.901367 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-zbm5j"] Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.906090 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/23a37a68-f3a2-451a-b7ab-714614d77140-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-mp6b2\" (UID: \"23a37a68-f3a2-451a-b7ab-714614d77140\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.910064 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.910592 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2"] Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.925491 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.927534 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tq9m\" (UniqueName: \"kubernetes.io/projected/161483a8-ced1-4cb7-9b9f-e4d2e983b901-kube-api-access-8tq9m\") pod \"ingress-canary-wjjlq\" (UID: \"161483a8-ced1-4cb7-9b9f-e4d2e983b901\") " pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.944847 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v477m\" (UniqueName: \"kubernetes.io/projected/2a8c8951-a5bf-45bc-ae21-bea34aee0143-kube-api-access-v477m\") pod \"multus-admission-controller-857f4d67dd-b74pk\" (UID: \"2a8c8951-a5bf-45bc-ae21-bea34aee0143\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.961543 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.967064 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.967200 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.467159603 +0000 UTC m=+144.257876751 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.967068 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6feef64c-3e2b-4f00-a30c-e35cb976384d-bound-sa-token\") pod \"ingress-operator-5b745b69d9-r8jf7\" (UID: \"6feef64c-3e2b-4f00-a30c-e35cb976384d\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.968786 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:42 crc kubenswrapper[5010]: E1126 15:28:42.969234 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.469218671 +0000 UTC m=+144.259935819 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.969671 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rjtpj" Nov 26 15:28:42 crc kubenswrapper[5010]: I1126 15:28:42.983423 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-wjjlq" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.002236 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqjhs\" (UniqueName: \"kubernetes.io/projected/08c96dd5-05ee-4c52-a832-a6b7ff2f0a47-kube-api-access-tqjhs\") pod \"machine-config-operator-74547568cd-dmrdb\" (UID: \"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.013131 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-xzsf5"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.014205 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs6wm\" (UniqueName: \"kubernetes.io/projected/d8454588-9ece-42d7-a263-74f7026f4ebe-kube-api-access-bs6wm\") pod \"service-ca-9c57cc56f-x97hz\" (UID: \"d8454588-9ece-42d7-a263-74f7026f4ebe\") " pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.016566 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.018213 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.029344 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/db957cb2-16f7-4282-9209-a3228efb3c20-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5jr82\" (UID: \"db957cb2-16f7-4282-9209-a3228efb3c20\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:43 crc kubenswrapper[5010]: W1126 15:28:43.029494 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0d44623_c021_45d4_bc90_b40247ec17ef.slice/crio-d2bf51c833c400124d6e5f186a8f683141613e09416f8a2633afd9bd4c85fce8 WatchSource:0}: Error finding container d2bf51c833c400124d6e5f186a8f683141613e09416f8a2633afd9bd4c85fce8: Status 404 returned error can't find the container with id d2bf51c833c400124d6e5f186a8f683141613e09416f8a2633afd9bd4c85fce8 Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.038446 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-9xndz"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.039980 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.050199 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7whz\" (UniqueName: \"kubernetes.io/projected/adb7407f-43e9-4089-8e7d-64a390f510af-kube-api-access-h7whz\") pod \"machine-config-controller-84d6567774-w5nrm\" (UID: \"adb7407f-43e9-4089-8e7d-64a390f510af\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.053283 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.064984 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.070005 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bz5rq\" (UniqueName: \"kubernetes.io/projected/f09f1157-dffe-47b5-8241-083a8b5ed7a9-kube-api-access-bz5rq\") pod \"olm-operator-6b444d44fb-vrdz2\" (UID: \"f09f1157-dffe-47b5-8241-083a8b5ed7a9\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.070381 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.070566 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.570535744 +0000 UTC m=+144.361252892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.070627 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.070954 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.570944666 +0000 UTC m=+144.361661824 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.084116 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.086599 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tlhzt\" (UniqueName: \"kubernetes.io/projected/90ac104e-9059-4bf4-8d44-0ce8ffb5c08a-kube-api-access-tlhzt\") pod \"router-default-5444994796-ptfqn\" (UID: \"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a\") " pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.106356 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7qb5\" (UniqueName: \"kubernetes.io/projected/7e26d790-6dd2-4e6e-8e21-8b791f39744e-kube-api-access-c7qb5\") pod \"marketplace-operator-79b997595-mr9qp\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.124058 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.133119 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.134317 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9qls\" (UniqueName: \"kubernetes.io/projected/446fb8a2-da33-4281-a0bf-98d3450a22e7-kube-api-access-d9qls\") pod \"collect-profiles-29402835-m4cc6\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.140640 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.148399 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgdpx\" (UniqueName: \"kubernetes.io/projected/cfe5e77c-7835-4193-b9ed-1df72669ea3d-kube-api-access-hgdpx\") pod \"migrator-59844c95c7-l97vm\" (UID: \"cfe5e77c-7835-4193-b9ed-1df72669ea3d\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.151089 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.157396 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.171351 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prbsf\" (UniqueName: \"kubernetes.io/projected/a0024c1b-b48e-4609-b132-e7078313d8ae-kube-api-access-prbsf\") pod \"packageserver-d55dfcdfc-qhhpw\" (UID: \"a0024c1b-b48e-4609-b132-e7078313d8ae\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.171665 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.172177 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.672158627 +0000 UTC m=+144.462875775 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.180161 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.183691 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs8nh\" (UniqueName: \"kubernetes.io/projected/39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13-kube-api-access-zs8nh\") pod \"kube-storage-version-migrator-operator-b67b599dd-pq92r\" (UID: \"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.200123 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.203571 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.206870 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwh9t\" (UniqueName: \"kubernetes.io/projected/7d5bba06-909e-4d1c-b379-53015d91e3fd-kube-api-access-qwh9t\") pod \"dns-default-x4zhd\" (UID: \"7d5bba06-909e-4d1c-b379-53015d91e3fd\") " pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.216194 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.236123 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.239091 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:43 crc kubenswrapper[5010]: W1126 15:28:43.271871 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b7d4779_2c4c_4555_982c_f79ed9d7474c.slice/crio-57da9cd5ffe3d1ed1d9969621c5f3ceb289ad5bbfebfb7e43628e7ce34e5b943 WatchSource:0}: Error finding container 57da9cd5ffe3d1ed1d9969621c5f3ceb289ad5bbfebfb7e43628e7ce34e5b943: Status 404 returned error can't find the container with id 57da9cd5ffe3d1ed1d9969621c5f3ceb289ad5bbfebfb7e43628e7ce34e5b943 Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.272993 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.273433 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.773390278 +0000 UTC m=+144.564107456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.365741 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.373465 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.374021 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.374635 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.874618979 +0000 UTC m=+144.665336127 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.376062 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-6pfg9"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.378580 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-p6kxm"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.379115 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.384962 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-djqn5"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.420222 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.446609 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.471285 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gw7ld"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.474289 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.476411 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.477024 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:43.977011293 +0000 UTC m=+144.767728441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.584290 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.584688 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.084667775 +0000 UTC m=+144.875384923 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.585238 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.585898 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.08588737 +0000 UTC m=+144.876604518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.635780 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-tbs79"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.638667 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.640120 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-q284v"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.659875 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.688342 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.688578 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2"] Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.688764 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.188748757 +0000 UTC m=+144.979465895 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.733207 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mr9qp"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.733262 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-wjjlq"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.756681 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" event={"ID":"fe4b6236-b05e-415e-ae6b-3404c7562f99","Type":"ContainerStarted","Data":"9fb7d1a201e4700c2a6dd9867e89dee59d44991f8a4856c13588af30c4033442"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.775068 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" event={"ID":"ce923c4d-997e-4746-bad1-1f611c9a67d6","Type":"ContainerStarted","Data":"490def519cbfc90787ab35277fe8803cc8a6a9fe51a8fffd025a780c155d06ed"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.789744 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.790139 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.290127822 +0000 UTC m=+145.080844970 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.810675 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" event={"ID":"72d7809b-b34d-4536-b98d-44cd347e4b67","Type":"ContainerStarted","Data":"468bf95d17abdf4afff28e8eaa018509ca6a6646b87daed772dfacb2db1ce29f"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.810744 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" event={"ID":"72d7809b-b34d-4536-b98d-44cd347e4b67","Type":"ContainerStarted","Data":"7e0985bc2a87c7be239421096480bc15e8f470f3142c95c51be884dde4726c0b"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.811003 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.814392 5010 patch_prober.go:28] interesting pod/console-operator-58897d9998-mwpwb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/readyz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.814442 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" podUID="72d7809b-b34d-4536-b98d-44cd347e4b67" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/readyz\": dial tcp 10.217.0.21:8443: connect: connection refused" Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.816025 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" event={"ID":"3d1c114a-859f-4dd2-8bd5-79f55b713703","Type":"ContainerStarted","Data":"44e3efd94150d355b7b6de3a3b7e03e0c4a069f3259de69675815a30520c6b0b"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.820863 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rjtpj" event={"ID":"3b7d4779-2c4c-4555-982c-f79ed9d7474c","Type":"ContainerStarted","Data":"57da9cd5ffe3d1ed1d9969621c5f3ceb289ad5bbfebfb7e43628e7ce34e5b943"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.826274 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-b74pk"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.837607 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" event={"ID":"718d3669-c82b-4c98-aff8-ea8862a17dca","Type":"ContainerStarted","Data":"0909430abcc9f68c2c238bf9dbe21ddf44ac9cedae5d05e55b34ad1b1c475342"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.837652 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" event={"ID":"718d3669-c82b-4c98-aff8-ea8862a17dca","Type":"ContainerStarted","Data":"21efbe54cfbc75e6545113034ae64803c14f4efad0aebe2b3cc75b228a3785da"} Nov 26 15:28:43 crc kubenswrapper[5010]: W1126 15:28:43.841991 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a3ecd68_6560_460d_b6ac_53209faecbd8.slice/crio-900a77a7726084c5c417a96e9ffe54bc1c8252bbce544979ad80c2f097a49ea6 WatchSource:0}: Error finding container 900a77a7726084c5c417a96e9ffe54bc1c8252bbce544979ad80c2f097a49ea6: Status 404 returned error can't find the container with id 900a77a7726084c5c417a96e9ffe54bc1c8252bbce544979ad80c2f097a49ea6 Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.843216 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" event={"ID":"c18f86a9-0cef-41d3-a371-dfcbb46f837f","Type":"ContainerStarted","Data":"a388044e55fdb0899733db3e94e8891e41f64536ccfefd1674fb32d34eaf0b5f"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.847044 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" event={"ID":"80e56b90-699c-4fcd-b69a-748b192fce11","Type":"ContainerStarted","Data":"0e559f71ef88d0de46f072a3a2c5f52b1218726984e6dfb187001a34106db844"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.851207 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" event={"ID":"a33ae656-009d-4adb-80ef-143cb00bba21","Type":"ContainerStarted","Data":"97f5628523486d97e9f108aead5e237da573a3396a869d062e236dcd9d703967"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.854548 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" event={"ID":"46a864b2-355a-4c4f-bcf1-36f4e1eeec24","Type":"ContainerStarted","Data":"1328332f30a7ee7e03b7dc9725135137294f6d7da7c3e47c5ce773fd3790fd18"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.855675 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" event={"ID":"4e14de8c-da99-4612-92ec-50f74d50c547","Type":"ContainerStarted","Data":"50667f8eb2b698d138929a47f6b22867948260df95dac602cbdf9a541562694c"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.861804 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" event={"ID":"68828445-ea1e-4df1-ba75-76f1179b5341","Type":"ContainerStarted","Data":"586689777fbdf59bf56053e3f333dc17e14aba0d4c6c1c1de026d0c020b43a2e"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.863507 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" event={"ID":"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8","Type":"ContainerStarted","Data":"e880475491a033f217f4fbe1ffab65294badb83c1ff004ba388d0ce067eb8d21"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.868469 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.871402 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" event={"ID":"6ef3bb6f-39b5-4919-90a4-897d4841c9f1","Type":"ContainerStarted","Data":"3942d6ad7fb137be45830c50e48b0ae9193e5c43df15ae4ef7b0eef04bbec242"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.873138 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" event={"ID":"8c454fe1-8825-4c5f-a145-727f16df4b00","Type":"ContainerStarted","Data":"1bbebbd6cb50a8c19f2d96ff1e6f6a30dd5dc9ea6938cdadb0002422eaf0035a"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.881516 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rh2vd" event={"ID":"f0d44623-c021-45d4-bc90-b40247ec17ef","Type":"ContainerStarted","Data":"d2bf51c833c400124d6e5f186a8f683141613e09416f8a2633afd9bd4c85fce8"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.886432 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" event={"ID":"0a0e77df-c904-48f2-a303-0c024c1fd066","Type":"ContainerStarted","Data":"2fed783c55edba4a6ec7bf18339e8028eeaa0b4bea7ece83961b52c698d1dc95"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.895519 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.897948 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:43 crc kubenswrapper[5010]: E1126 15:28:43.899200 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.399184175 +0000 UTC m=+145.189901323 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.938532 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" event={"ID":"1383de27-90fb-498e-8e3e-b622760bfb96","Type":"ContainerStarted","Data":"a00bb8ee470f0b5459ebee21b81476bde51c09779d1559bffa864e081acc034b"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.940922 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-p6kxm" event={"ID":"22abed70-9135-4e67-a009-b013ada1f720","Type":"ContainerStarted","Data":"bc13ed86f999e9c40a59e6ccdcf523948ceaf061ea89553f6b0904ed72db092b"} Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.973895 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6"] Nov 26 15:28:43 crc kubenswrapper[5010]: I1126 15:28:43.998880 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" event={"ID":"ed362947-89a1-4af0-843c-fde4fd5b61ec","Type":"ContainerStarted","Data":"7ba48c1f9d9add41e182cac858b7fa2654f1385a5583e25edf1b3ba16325e785"} Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.007199 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.008592 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" event={"ID":"a544cc25-5303-452d-bbd3-5ac22b642ad7","Type":"ContainerStarted","Data":"70905478adab51ca21fa063f5355be8fa0a999ac984b0fe636de06156ad0d355"} Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.012037 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.512011013 +0000 UTC m=+145.302728171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.086816 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.109140 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.109684 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.609666794 +0000 UTC m=+145.400383942 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: W1126 15:28:44.158017 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf09f1157_dffe_47b5_8241_083a8b5ed7a9.slice/crio-411da64966e94dfcb617db5e553e63cf130ee32b7dbe9cc058143d80292f482e WatchSource:0}: Error finding container 411da64966e94dfcb617db5e553e63cf130ee32b7dbe9cc058143d80292f482e: Status 404 returned error can't find the container with id 411da64966e94dfcb617db5e553e63cf130ee32b7dbe9cc058143d80292f482e Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.180092 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-fgc69"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.211255 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.211620 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.711603215 +0000 UTC m=+145.502320363 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.231553 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.315080 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.315317 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.815277815 +0000 UTC m=+145.605994963 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.315505 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.316467 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.816458668 +0000 UTC m=+145.607175816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: W1126 15:28:44.351016 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcfe5e77c_7835_4193_b9ed_1df72669ea3d.slice/crio-a82a1ff6117bcd8e0b4c0eae52c47d237d157c80afdf921e19ee369b92fd6d9e WatchSource:0}: Error finding container a82a1ff6117bcd8e0b4c0eae52c47d237d157c80afdf921e19ee369b92fd6d9e: Status 404 returned error can't find the container with id a82a1ff6117bcd8e0b4c0eae52c47d237d157c80afdf921e19ee369b92fd6d9e Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.359314 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-x97hz"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.416975 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.417230 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.917183465 +0000 UTC m=+145.707900613 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.417744 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.418224 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:44.918197514 +0000 UTC m=+145.708914662 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: W1126 15:28:44.451916 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8454588_9ece_42d7_a263_74f7026f4ebe.slice/crio-0dc64e1c751f701fb6c82176174f48f2a38a6a1e0c5a1ab6b0cc265b070e35d4 WatchSource:0}: Error finding container 0dc64e1c751f701fb6c82176174f48f2a38a6a1e0c5a1ab6b0cc265b070e35d4: Status 404 returned error can't find the container with id 0dc64e1c751f701fb6c82176174f48f2a38a6a1e0c5a1ab6b0cc265b070e35d4 Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.496082 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.514967 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.519302 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.519636 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.01960106 +0000 UTC m=+145.810318218 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.519987 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.520534 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.020525676 +0000 UTC m=+145.811242814 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.622786 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.623739 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.123722052 +0000 UTC m=+145.914439210 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.666893 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-x4zhd"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.671782 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw"] Nov 26 15:28:44 crc kubenswrapper[5010]: W1126 15:28:44.676251 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb957cb2_16f7_4282_9209_a3228efb3c20.slice/crio-7f414fa5aac549cfa534230536d460723153258bf8909b404c55266792e0f53f WatchSource:0}: Error finding container 7f414fa5aac549cfa534230536d460723153258bf8909b404c55266792e0f53f: Status 404 returned error can't find the container with id 7f414fa5aac549cfa534230536d460723153258bf8909b404c55266792e0f53f Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.710470 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r"] Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.725132 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.725924 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.22589746 +0000 UTC m=+146.016614618 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.744833 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" podStartSLOduration=124.744809775 podStartE2EDuration="2m4.744809775s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:44.712532162 +0000 UTC m=+145.503249320" watchObservedRunningTime="2025-11-26 15:28:44.744809775 +0000 UTC m=+145.535526923" Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.826152 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.826763 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.32673816 +0000 UTC m=+146.117455308 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.828734 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.829139 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.329130388 +0000 UTC m=+146.119847536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:44 crc kubenswrapper[5010]: I1126 15:28:44.929914 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:44 crc kubenswrapper[5010]: E1126 15:28:44.931063 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.431025278 +0000 UTC m=+146.221742426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.034561 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" event={"ID":"a0024c1b-b48e-4609-b132-e7078313d8ae","Type":"ContainerStarted","Data":"4316252e24054448315e2e6b4acaffab13e9470fee74d7982d3b355e62231306"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.035815 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.037010 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.536986823 +0000 UTC m=+146.327704171 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.052470 5010 generic.go:334] "Generic (PLEG): container finished" podID="8c454fe1-8825-4c5f-a145-727f16df4b00" containerID="595d5234a2d52c9f9dfb69d7bf3acc14f06281ebefc64f9c132dcd3a756679e8" exitCode=0 Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.052590 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" event={"ID":"8c454fe1-8825-4c5f-a145-727f16df4b00","Type":"ContainerDied","Data":"595d5234a2d52c9f9dfb69d7bf3acc14f06281ebefc64f9c132dcd3a756679e8"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.073032 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rh2vd" event={"ID":"f0d44623-c021-45d4-bc90-b40247ec17ef","Type":"ContainerStarted","Data":"8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.088325 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-h7gv8" podStartSLOduration=126.084174186 podStartE2EDuration="2m6.084174186s" podCreationTimestamp="2025-11-26 15:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:44.862281375 +0000 UTC m=+145.652998523" watchObservedRunningTime="2025-11-26 15:28:45.084174186 +0000 UTC m=+145.874891334" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.088810 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" event={"ID":"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13","Type":"ContainerStarted","Data":"0a965b3991fdd740a9f1e53e6783415269a28f39e4bbb62dd100501134f73812"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.111405 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" event={"ID":"446fb8a2-da33-4281-a0bf-98d3450a22e7","Type":"ContainerStarted","Data":"05ed2c83ae3c218944dc1a4d86cde50a307f28e77065ce746efe55da7554c619"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.111461 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" event={"ID":"446fb8a2-da33-4281-a0bf-98d3450a22e7","Type":"ContainerStarted","Data":"fcd23aed1654bd290f517b5300f3ee91f19bcb0b6bd292512d3f728528a2e39a"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.120301 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ptfqn" event={"ID":"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a","Type":"ContainerStarted","Data":"897294c33034f01402548ffcfd2d78cb445eaff9c0c13a6871a08b4f3853d5e9"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.138603 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.139698 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.639682975 +0000 UTC m=+146.430400123 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.146448 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" podStartSLOduration=125.146430136 podStartE2EDuration="2m5.146430136s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.146148288 +0000 UTC m=+145.936865436" watchObservedRunningTime="2025-11-26 15:28:45.146430136 +0000 UTC m=+145.937147284" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.146853 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-rh2vd" podStartSLOduration=125.146848598 podStartE2EDuration="2m5.146848598s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.119762462 +0000 UTC m=+145.910479620" watchObservedRunningTime="2025-11-26 15:28:45.146848598 +0000 UTC m=+145.937565736" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.155056 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" event={"ID":"4e14de8c-da99-4612-92ec-50f74d50c547","Type":"ContainerStarted","Data":"95adf921f326272db1e31bd44b4a6957cb51c028f11306482e0acbafed0b838d"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.174986 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" event={"ID":"db957cb2-16f7-4282-9209-a3228efb3c20","Type":"ContainerStarted","Data":"7f414fa5aac549cfa534230536d460723153258bf8909b404c55266792e0f53f"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.186275 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" event={"ID":"f09f1157-dffe-47b5-8241-083a8b5ed7a9","Type":"ContainerStarted","Data":"411da64966e94dfcb617db5e553e63cf130ee32b7dbe9cc058143d80292f482e"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.202497 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" event={"ID":"7e26d790-6dd2-4e6e-8e21-8b791f39744e","Type":"ContainerStarted","Data":"da7466b664f286bad12abd26f9b06c34b6f1c06bbd4bd53154fca4cec40748e9"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.202769 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" event={"ID":"7e26d790-6dd2-4e6e-8e21-8b791f39744e","Type":"ContainerStarted","Data":"d0651332f018ba1df07392f32ba6557ada76566ccc7718be65a99273e2530240"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.204030 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.211850 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" event={"ID":"5a3ecd68-6560-460d-b6ac-53209faecbd8","Type":"ContainerStarted","Data":"900a77a7726084c5c417a96e9ffe54bc1c8252bbce544979ad80c2f097a49ea6"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.218667 5010 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-mr9qp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.219095 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" podUID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.221777 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-x4zhd" event={"ID":"7d5bba06-909e-4d1c-b379-53015d91e3fd","Type":"ContainerStarted","Data":"c43b5e2ddd1601af449d0cfda45eb93dc7cdd1641dad439220274472453b0b7c"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.235302 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" podStartSLOduration=125.235281847 podStartE2EDuration="2m5.235281847s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.234921707 +0000 UTC m=+146.025638855" watchObservedRunningTime="2025-11-26 15:28:45.235281847 +0000 UTC m=+146.025998995" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.237325 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" event={"ID":"23a37a68-f3a2-451a-b7ab-714614d77140","Type":"ContainerStarted","Data":"0bf59cf61c19e4402a267d0f8debe198e4674668a0aa2c47e15fe2a7e8e89084"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.240242 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.242054 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.742036628 +0000 UTC m=+146.532753776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.246589 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" event={"ID":"ce923c4d-997e-4746-bad1-1f611c9a67d6","Type":"ContainerStarted","Data":"7c0f789902065c23371e86b0e77f195b561f2b9d67be3b7b0e52d2519124a56d"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.263408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" event={"ID":"80e56b90-699c-4fcd-b69a-748b192fce11","Type":"ContainerStarted","Data":"ce0495e0adccb20d884cf4cff7a4e1985eaba2cb28dd99fde4574c4f2207e355"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.294205 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" event={"ID":"1383de27-90fb-498e-8e3e-b622760bfb96","Type":"ContainerStarted","Data":"3b803e37ddf7355fc8eba1ddb1ca4a61cbac4d48cd5708cbe1908bc160c96012"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.298783 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" event={"ID":"ed24d6fa-b5ba-445c-8e3c-cba4963b89f8","Type":"ContainerStarted","Data":"53930a1b2358debf2b4adf55571c80817186d6802ff7d4459092d194dc57f39f"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.306979 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" event={"ID":"6ef3bb6f-39b5-4919-90a4-897d4841c9f1","Type":"ContainerStarted","Data":"c06034f69c2eaa634cd3534c7d7e3290a5173bc151be854a810b7cff67d2be0b"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.308115 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.311541 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" event={"ID":"a33ae656-009d-4adb-80ef-143cb00bba21","Type":"ContainerStarted","Data":"d7a2c84dd94c042320b47f83e8811322fb609bdff0b5e30570bf99dad5a0cd34"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.313369 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.320500 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" event={"ID":"d8454588-9ece-42d7-a263-74f7026f4ebe","Type":"ContainerStarted","Data":"0dc64e1c751f701fb6c82176174f48f2a38a6a1e0c5a1ab6b0cc265b070e35d4"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.322473 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-rgw5m" podStartSLOduration=125.322461891 podStartE2EDuration="2m5.322461891s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.321067962 +0000 UTC m=+146.111785110" watchObservedRunningTime="2025-11-26 15:28:45.322461891 +0000 UTC m=+146.113179039" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.323781 5010 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-6pfg9 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.323835 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" podUID="6ef3bb6f-39b5-4919-90a4-897d4841c9f1" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.325870 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" podStartSLOduration=125.325863267 podStartE2EDuration="2m5.325863267s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.274280069 +0000 UTC m=+146.064997217" watchObservedRunningTime="2025-11-26 15:28:45.325863267 +0000 UTC m=+146.116580415" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.343565 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.346114 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.846090979 +0000 UTC m=+146.636808127 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.347387 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rjtpj" event={"ID":"3b7d4779-2c4c-4555-982c-f79ed9d7474c","Type":"ContainerStarted","Data":"9dcf2aef4a73bbee340a6eb13a1c802041f3a283e754ccef9e7096866d9183cc"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.348979 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-f6hqp" podStartSLOduration=125.34895461 podStartE2EDuration="2m5.34895461s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.348232269 +0000 UTC m=+146.138949417" watchObservedRunningTime="2025-11-26 15:28:45.34895461 +0000 UTC m=+146.139671758" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.379097 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" event={"ID":"1febbf8b-6b1c-444e-ab84-5bebe58cb635","Type":"ContainerStarted","Data":"6e026e61a4147848abe034e2d65ea17be520cad48048b4e84c84b10c69d9d85d"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.379150 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" event={"ID":"1febbf8b-6b1c-444e-ab84-5bebe58cb635","Type":"ContainerStarted","Data":"4a8c4317bb94e38579136ed0b36bd6a3da5586f2effd2f09c9d7839f401f3897"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.379659 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.395530 5010 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-ncsqq container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.395943 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" podUID="1febbf8b-6b1c-444e-ab84-5bebe58cb635" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.396128 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" event={"ID":"ed362947-89a1-4af0-843c-fde4fd5b61ec","Type":"ContainerStarted","Data":"3d0f1ef6e5043b2a5707dc1f2dac1fb286aa3c4bd9b0ed24941f996fa153750c"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.400635 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" podStartSLOduration=125.40062049 podStartE2EDuration="2m5.40062049s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.374470801 +0000 UTC m=+146.165187969" watchObservedRunningTime="2025-11-26 15:28:45.40062049 +0000 UTC m=+146.191337638" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.404549 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-sxvhn" podStartSLOduration=125.404538971 podStartE2EDuration="2m5.404538971s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.399834828 +0000 UTC m=+146.190551996" watchObservedRunningTime="2025-11-26 15:28:45.404538971 +0000 UTC m=+146.195256119" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.436500 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" event={"ID":"a544cc25-5303-452d-bbd3-5ac22b642ad7","Type":"ContainerStarted","Data":"ed72363125026e0b2fb8acdea18783699b18904e0dbe15d8e26ef40ce4de02ca"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.441415 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-wjjlq" event={"ID":"161483a8-ced1-4cb7-9b9f-e4d2e983b901","Type":"ContainerStarted","Data":"44861cd221ec959188e4bc0fdcc4d79d1f2fab65a9d8235ca31ba4b934c8b0f5"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.441577 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-wjjlq" event={"ID":"161483a8-ced1-4cb7-9b9f-e4d2e983b901","Type":"ContainerStarted","Data":"cbf1b8a3c2b2e0e2ddd439b04ae029bd7f677b63ee9e60950353c6b4fd30dbc8"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.446186 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.450831 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" podStartSLOduration=125.450803028 podStartE2EDuration="2m5.450803028s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.45050068 +0000 UTC m=+146.241217828" watchObservedRunningTime="2025-11-26 15:28:45.450803028 +0000 UTC m=+146.241520176" Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.451622 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:45.951608281 +0000 UTC m=+146.742325429 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.454079 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" event={"ID":"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47","Type":"ContainerStarted","Data":"1e946c2f60d2f4a6a36b468211a5eee7f6d6c6a8b426d0923eb7408210c0d15b"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.468022 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" event={"ID":"68828445-ea1e-4df1-ba75-76f1179b5341","Type":"ContainerStarted","Data":"8e9e993947ab1f8d87eaffabafeb6c4f0fd0e73c1a2b162b7e7336fbdc09c1bc"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.486788 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-rjtpj" podStartSLOduration=5.486759825 podStartE2EDuration="5.486759825s" podCreationTimestamp="2025-11-26 15:28:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.482978268 +0000 UTC m=+146.273695426" watchObservedRunningTime="2025-11-26 15:28:45.486759825 +0000 UTC m=+146.277476983" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.504296 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" event={"ID":"5033a0ed-28fe-481b-b9a8-2f68a0fb3330","Type":"ContainerStarted","Data":"f31671139702a0c248ef4bc4c49b48dd000d46b7f3567652c81a0c675baafd6a"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.504390 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" event={"ID":"5033a0ed-28fe-481b-b9a8-2f68a0fb3330","Type":"ContainerStarted","Data":"87e6cf37b7ed02c8b51fd5ffd626e056a7765a5dfbf76c8565b0524aaf64b45c"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.515472 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-ztgkq" podStartSLOduration=126.515436605 podStartE2EDuration="2m6.515436605s" podCreationTimestamp="2025-11-26 15:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.51170268 +0000 UTC m=+146.302419828" watchObservedRunningTime="2025-11-26 15:28:45.515436605 +0000 UTC m=+146.306153763" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.516799 5010 generic.go:334] "Generic (PLEG): container finished" podID="0a0e77df-c904-48f2-a303-0c024c1fd066" containerID="1ff4299f1ee0ed5fc882e2deafba0efb02475163d28bbe41002d97fc49129a4c" exitCode=0 Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.517102 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" event={"ID":"0a0e77df-c904-48f2-a303-0c024c1fd066","Type":"ContainerDied","Data":"1ff4299f1ee0ed5fc882e2deafba0efb02475163d28bbe41002d97fc49129a4c"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.536360 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-p6kxm" event={"ID":"22abed70-9135-4e67-a009-b013ada1f720","Type":"ContainerStarted","Data":"e6f1ed7e5ed6bd8b5a4b862d6c2b633e5dbeda8a00f57c57d8f1aae330738203"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.536725 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-p6kxm" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.547374 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-q284v" podStartSLOduration=125.547347387 podStartE2EDuration="2m5.547347387s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.544455485 +0000 UTC m=+146.335172633" watchObservedRunningTime="2025-11-26 15:28:45.547347387 +0000 UTC m=+146.338064535" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.547873 5010 patch_prober.go:28] interesting pod/downloads-7954f5f757-p6kxm container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.547928 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-p6kxm" podUID="22abed70-9135-4e67-a009-b013ada1f720" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.572053 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.572237 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" event={"ID":"cfe5e77c-7835-4193-b9ed-1df72669ea3d","Type":"ContainerStarted","Data":"a82a1ff6117bcd8e0b4c0eae52c47d237d157c80afdf921e19ee369b92fd6d9e"} Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.572371 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.072351184 +0000 UTC m=+146.863068332 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.572806 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.576531 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.076514191 +0000 UTC m=+146.867231339 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.578067 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" podStartSLOduration=125.578038494 podStartE2EDuration="2m5.578038494s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.57327477 +0000 UTC m=+146.363991948" watchObservedRunningTime="2025-11-26 15:28:45.578038494 +0000 UTC m=+146.368755642" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.613528 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.641967 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" event={"ID":"fe4b6236-b05e-415e-ae6b-3404c7562f99","Type":"ContainerStarted","Data":"f433894a07005cb8c844010632ba1fe2050f802aa2886e85fc365020f75d3453"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.660223 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-zbm5j" podStartSLOduration=126.660203376 podStartE2EDuration="2m6.660203376s" podCreationTimestamp="2025-11-26 15:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.659011282 +0000 UTC m=+146.449728430" watchObservedRunningTime="2025-11-26 15:28:45.660203376 +0000 UTC m=+146.450920524" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.660339 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-wjjlq" podStartSLOduration=5.660333319 podStartE2EDuration="5.660333319s" podCreationTimestamp="2025-11-26 15:28:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.61184709 +0000 UTC m=+146.402564238" watchObservedRunningTime="2025-11-26 15:28:45.660333319 +0000 UTC m=+146.451050507" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.676534 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.677908 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.177891886 +0000 UTC m=+146.968609034 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.680422 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" event={"ID":"adb7407f-43e9-4089-8e7d-64a390f510af","Type":"ContainerStarted","Data":"ce005ec2199299f1dc8a6bdbe04659a5409cda2f5828896d5619808575e4bcec"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.720762 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-p6kxm" podStartSLOduration=125.720729136 podStartE2EDuration="2m5.720729136s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.70421934 +0000 UTC m=+146.494936498" watchObservedRunningTime="2025-11-26 15:28:45.720729136 +0000 UTC m=+146.511446294" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.750540 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" event={"ID":"6feef64c-3e2b-4f00-a30c-e35cb976384d","Type":"ContainerStarted","Data":"c4831a9256816aad978276963fcce53e85a3734d2527eb6a7176a4f1c7345ee8"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.769312 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" event={"ID":"46a864b2-355a-4c4f-bcf1-36f4e1eeec24","Type":"ContainerStarted","Data":"7df60a81ad389f535b7adbe2512b9f66a75dfa47da9613ddf6e9bc748a1317e1"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.787401 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.787900 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-jjtsh" podStartSLOduration=125.787881504 podStartE2EDuration="2m5.787881504s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.786521476 +0000 UTC m=+146.577238624" watchObservedRunningTime="2025-11-26 15:28:45.787881504 +0000 UTC m=+146.578598652" Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.797326 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.297306761 +0000 UTC m=+147.088023909 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.830731 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" event={"ID":"2a8c8951-a5bf-45bc-ae21-bea34aee0143","Type":"ContainerStarted","Data":"10bf60b117400a21bff32361731d563abf7d9b66605851b35bbc0353e3b81921"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.862378 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" event={"ID":"69ee19f7-5399-4651-87c8-722ccfbb7e74","Type":"ContainerStarted","Data":"ea53aae58a80c6e59ea097dcce7f4b771c6904fab990c0b965b8620163bb43d7"} Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.863558 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-hhw28" podStartSLOduration=125.863526192 podStartE2EDuration="2m5.863526192s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:45.856820323 +0000 UTC m=+146.647537491" watchObservedRunningTime="2025-11-26 15:28:45.863526192 +0000 UTC m=+146.654243340" Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.896927 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:45 crc kubenswrapper[5010]: E1126 15:28:45.897996 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.397960755 +0000 UTC m=+147.188677903 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:45 crc kubenswrapper[5010]: I1126 15:28:45.963042 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-mwpwb" Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:45.999629 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.000550 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.500526894 +0000 UTC m=+147.291244062 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.105053 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.105305 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.605289025 +0000 UTC m=+147.396006173 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.105436 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.105814 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.60580535 +0000 UTC m=+147.396522498 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.208725 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.209070 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.709016607 +0000 UTC m=+147.499733745 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.209563 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.209953 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.709938873 +0000 UTC m=+147.500656021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.310545 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.311026 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.81100977 +0000 UTC m=+147.601726918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.413956 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.415295 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:46.915279287 +0000 UTC m=+147.705996435 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.527328 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.527654 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.027631532 +0000 UTC m=+147.818348680 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.629296 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.629733 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.129702267 +0000 UTC m=+147.920419415 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.731131 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.731481 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.231460503 +0000 UTC m=+148.022177651 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.731624 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.731943 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.231937306 +0000 UTC m=+148.022654454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.834474 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.834861 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.334841965 +0000 UTC m=+148.125559113 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.935000 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" event={"ID":"5a3ecd68-6560-460d-b6ac-53209faecbd8","Type":"ContainerStarted","Data":"446651ad9c6f074e3dbabb48b1a3b0e39c07b99c0dfb97de5df268ba75f0574d"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.939755 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:46 crc kubenswrapper[5010]: E1126 15:28:46.940123 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.44010884 +0000 UTC m=+148.230825988 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.946350 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" event={"ID":"8c454fe1-8825-4c5f-a145-727f16df4b00","Type":"ContainerStarted","Data":"da060f546b5f61a5d456310a2e2a1d8a8cf06e8c4bd7882a3af30c0ddbb95beb"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.946617 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.953449 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" event={"ID":"cfe5e77c-7835-4193-b9ed-1df72669ea3d","Type":"ContainerStarted","Data":"bfacc0a63c8dfd976a08c868080c684740050f31b8a5d0e80d07a5ecd24adb08"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.953513 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" event={"ID":"cfe5e77c-7835-4193-b9ed-1df72669ea3d","Type":"ContainerStarted","Data":"0bfda0e0b1d7d665c3bf5c839ece8512dc7c8d7a4d3b4e7717ec3cc60694c5e8"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.973235 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" event={"ID":"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47","Type":"ContainerStarted","Data":"1be6dfc52ea874f5126eaa747c7da8f5c36c749c41f9533c4e985e5554e34021"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.973316 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" event={"ID":"08c96dd5-05ee-4c52-a832-a6b7ff2f0a47","Type":"ContainerStarted","Data":"5c7d4e7167115726e90d85755ffadb9346dd795ba65b9058ce3c748959af9dd6"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.976066 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" podStartSLOduration=126.976041156 podStartE2EDuration="2m6.976041156s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:46.974120631 +0000 UTC m=+147.764837779" watchObservedRunningTime="2025-11-26 15:28:46.976041156 +0000 UTC m=+147.766758304" Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.982263 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" event={"ID":"3d1c114a-859f-4dd2-8bd5-79f55b713703","Type":"ContainerStarted","Data":"cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.983457 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.993212 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" event={"ID":"4e14de8c-da99-4612-92ec-50f74d50c547","Type":"ContainerStarted","Data":"65eb0407b818134aa90653ab6ac0d5fd15434ae0cd9b30a11ef133dcbe2e29de"} Nov 26 15:28:46 crc kubenswrapper[5010]: I1126 15:28:46.997653 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-l97vm" podStartSLOduration=126.997633296 podStartE2EDuration="2m6.997633296s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:46.997350648 +0000 UTC m=+147.788067796" watchObservedRunningTime="2025-11-26 15:28:46.997633296 +0000 UTC m=+147.788350434" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.000060 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" event={"ID":"ed362947-89a1-4af0-843c-fde4fd5b61ec","Type":"ContainerStarted","Data":"e87a4177ceefd85e6075be95345c15c39c7fcd5d3bcf056d062603d549bbe018"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.010487 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" event={"ID":"adb7407f-43e9-4089-8e7d-64a390f510af","Type":"ContainerStarted","Data":"37fb6156a4141d12f65036dee3e0f089cf37b56ace80f87cd8992efaaf48830d"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.010936 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" event={"ID":"adb7407f-43e9-4089-8e7d-64a390f510af","Type":"ContainerStarted","Data":"c88101757f8a2b58c781845d7f2ed097163b32a12722822949d6d0c4f54abd73"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.026669 5010 generic.go:334] "Generic (PLEG): container finished" podID="c18f86a9-0cef-41d3-a371-dfcbb46f837f" containerID="452700cc0b2c6bca4c02bd6a6212c80790c84f204ea04214707df03bede7ab81" exitCode=0 Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.027092 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" event={"ID":"c18f86a9-0cef-41d3-a371-dfcbb46f837f","Type":"ContainerDied","Data":"452700cc0b2c6bca4c02bd6a6212c80790c84f204ea04214707df03bede7ab81"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.032683 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" event={"ID":"0a0e77df-c904-48f2-a303-0c024c1fd066","Type":"ContainerStarted","Data":"3acbce433858b3157dd74c928221b3a46e1f0b8e774ab71573951ce81eb598ee"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.041393 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.041891 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.541868326 +0000 UTC m=+148.332585474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.042820 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-dmrdb" podStartSLOduration=127.042797472 podStartE2EDuration="2m7.042797472s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.042613407 +0000 UTC m=+147.833330555" watchObservedRunningTime="2025-11-26 15:28:47.042797472 +0000 UTC m=+147.833514620" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.042960 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" event={"ID":"2a8c8951-a5bf-45bc-ae21-bea34aee0143","Type":"ContainerStarted","Data":"64f2093794e07f1466f03e280c8f4b2e2a8f11018b1372626c53ea8170a2c240"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.046128 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" event={"ID":"2a8c8951-a5bf-45bc-ae21-bea34aee0143","Type":"ContainerStarted","Data":"c42aeffb4c2771c73de0dd24c912e96caf5fb9e75bbe3e24c716a905527b20d0"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.058326 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-x4zhd" event={"ID":"7d5bba06-909e-4d1c-b379-53015d91e3fd","Type":"ContainerStarted","Data":"33e65f616d4a44d3089a0ff2224036180135d8be05955029ab2deb528acc5de8"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.081131 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" event={"ID":"db957cb2-16f7-4282-9209-a3228efb3c20","Type":"ContainerStarted","Data":"347a7b85551b0e76117eda2a30a5a609a2f8fc8c84e752bed07d8f6b58c1933a"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.091161 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" event={"ID":"f09f1157-dffe-47b5-8241-083a8b5ed7a9","Type":"ContainerStarted","Data":"9637000d070992f24c07c3ab87d0c1a7dd2a0aec380297ee1ec297c5e3184e1f"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.092596 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.094610 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" event={"ID":"69ee19f7-5399-4651-87c8-722ccfbb7e74","Type":"ContainerStarted","Data":"1e049d36e3a9be51381f4cf643500ef49afc8ada323496c913b3b055c4fde552"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.106239 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-xzsf5" podStartSLOduration=127.106209815 podStartE2EDuration="2m7.106209815s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.100636767 +0000 UTC m=+147.891353925" watchObservedRunningTime="2025-11-26 15:28:47.106209815 +0000 UTC m=+147.896926963" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.108059 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" event={"ID":"d8454588-9ece-42d7-a263-74f7026f4ebe","Type":"ContainerStarted","Data":"aa063a79472ada69ba96caaa595b870380cac1e45127054d34539b363794fd21"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.124192 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.128611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" event={"ID":"6feef64c-3e2b-4f00-a30c-e35cb976384d","Type":"ContainerStarted","Data":"7dbffef74f7c3f8f5a05d26e129d94796dabad8a81bb5e92c6f4cad4647dd0f8"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.128881 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" event={"ID":"6feef64c-3e2b-4f00-a30c-e35cb976384d","Type":"ContainerStarted","Data":"8a4fb2ad61a257cdda5eb659f6a285c130c0cc1b3560498ecfe2b9afa550e118"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.136577 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" podStartSLOduration=128.136552932 podStartE2EDuration="2m8.136552932s" podCreationTimestamp="2025-11-26 15:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.12799917 +0000 UTC m=+147.918716318" watchObservedRunningTime="2025-11-26 15:28:47.136552932 +0000 UTC m=+147.927270080" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.141139 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-mp6b2" event={"ID":"23a37a68-f3a2-451a-b7ab-714614d77140","Type":"ContainerStarted","Data":"3fe2b48cd6eed97012c8c3631c18f0f88b9c3051fbc4e5b5d5d4f7edf7e512ac"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.143065 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.148048 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.648031327 +0000 UTC m=+148.438748465 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.155653 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" event={"ID":"39cdf3f7-8c3f-4c63-8c8f-88819f6f6f13","Type":"ContainerStarted","Data":"0bd7d7cfe847bbde7160f827fb311776ffb63261a27558abfda8617f28731a5d"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.157028 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.186607 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" event={"ID":"68828445-ea1e-4df1-ba75-76f1179b5341","Type":"ContainerStarted","Data":"1b1e0a5a560c33e0f12449358a2cb9847e6e836d6394728f02020e8f389b6b43"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.186734 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.205493 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ptfqn" event={"ID":"90ac104e-9059-4bf4-8d44-0ce8ffb5c08a","Type":"ContainerStarted","Data":"69b18ebf88b9725af52cc48bd5fe3990f5273f0e4f50283a63a60397fc75aac4"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.212349 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" podStartSLOduration=127.212331544 podStartE2EDuration="2m7.212331544s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.209795862 +0000 UTC m=+148.000513030" watchObservedRunningTime="2025-11-26 15:28:47.212331544 +0000 UTC m=+148.003048692" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.215808 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" event={"ID":"a0024c1b-b48e-4609-b132-e7078313d8ae","Type":"ContainerStarted","Data":"9d19a84cc564d87048642154796953dcbcb4fb4449c28b44090d58a39b86cbeb"} Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.216157 5010 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-mr9qp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" start-of-body= Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.216209 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" podUID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.23:8080/healthz\": dial tcp 10.217.0.23:8080: connect: connection refused" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.229130 5010 patch_prober.go:28] interesting pod/downloads-7954f5f757-p6kxm container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.229200 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-p6kxm" podUID="22abed70-9135-4e67-a009-b013ada1f720" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.243358 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncsqq" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.244029 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.245749 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.745728118 +0000 UTC m=+148.536445276 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.246107 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-l7l7k" podStartSLOduration=127.246094618 podStartE2EDuration="2m7.246094618s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.243197656 +0000 UTC m=+148.033914824" watchObservedRunningTime="2025-11-26 15:28:47.246094618 +0000 UTC m=+148.036811766" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.282389 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-w5nrm" podStartSLOduration=127.282352303 podStartE2EDuration="2m7.282352303s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.277489485 +0000 UTC m=+148.068206853" watchObservedRunningTime="2025-11-26 15:28:47.282352303 +0000 UTC m=+148.073069451" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.285226 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.304043 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-r8jf7" podStartSLOduration=127.304017935 podStartE2EDuration="2m7.304017935s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.302083331 +0000 UTC m=+148.092800489" watchObservedRunningTime="2025-11-26 15:28:47.304017935 +0000 UTC m=+148.094735093" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.348293 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.349882 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.349922 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.352274 5010 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-6bsj2 container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.10:8443/livez\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.352391 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" podUID="0a0e77df-c904-48f2-a303-0c024c1fd066" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.10:8443/livez\": dial tcp 10.217.0.10:8443: connect: connection refused" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.374776 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.375266 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.875231408 +0000 UTC m=+148.665948596 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.401824 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:47 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:47 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:47 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.401904 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.414369 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" podStartSLOduration=127.414341593 podStartE2EDuration="2m7.414341593s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.372967044 +0000 UTC m=+148.163684192" watchObservedRunningTime="2025-11-26 15:28:47.414341593 +0000 UTC m=+148.205058771" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.414876 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-vrdz2" podStartSLOduration=127.414871708 podStartE2EDuration="2m7.414871708s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.412855541 +0000 UTC m=+148.203572689" watchObservedRunningTime="2025-11-26 15:28:47.414871708 +0000 UTC m=+148.205588856" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.435139 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-pq92r" podStartSLOduration=127.435119421 podStartE2EDuration="2m7.435119421s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.434331478 +0000 UTC m=+148.225048626" watchObservedRunningTime="2025-11-26 15:28:47.435119421 +0000 UTC m=+148.225836559" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.466396 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.466787 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:47.966768415 +0000 UTC m=+148.757485563 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.502119 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-fgc69" podStartSLOduration=127.502098614 podStartE2EDuration="2m7.502098614s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.49842776 +0000 UTC m=+148.289144908" watchObservedRunningTime="2025-11-26 15:28:47.502098614 +0000 UTC m=+148.292815752" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.575031 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.575438 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.075423686 +0000 UTC m=+148.866140834 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.592038 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5jr82" podStartSLOduration=127.592021415 podStartE2EDuration="2m7.592021415s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.531963428 +0000 UTC m=+148.322680586" watchObservedRunningTime="2025-11-26 15:28:47.592021415 +0000 UTC m=+148.382738563" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.592844 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-b74pk" podStartSLOduration=127.592838338 podStartE2EDuration="2m7.592838338s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.591265784 +0000 UTC m=+148.381982932" watchObservedRunningTime="2025-11-26 15:28:47.592838338 +0000 UTC m=+148.383555486" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.601143 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7jk7d"] Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.602247 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.608986 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.629261 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" podStartSLOduration=127.629242467 podStartE2EDuration="2m7.629242467s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.627282942 +0000 UTC m=+148.418000080" watchObservedRunningTime="2025-11-26 15:28:47.629242467 +0000 UTC m=+148.419959615" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.637111 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7jk7d"] Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.655656 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-x97hz" podStartSLOduration=127.655631973 podStartE2EDuration="2m7.655631973s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.654074029 +0000 UTC m=+148.444791177" watchObservedRunningTime="2025-11-26 15:28:47.655631973 +0000 UTC m=+148.446349121" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.675632 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.675988 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-catalog-content\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.676023 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-utilities\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.676060 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4npnx\" (UniqueName: \"kubernetes.io/projected/5ca95312-780d-4552-9833-1ef36dd5d15d-kube-api-access-4npnx\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.676151 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.176124492 +0000 UTC m=+148.966841640 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.779964 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-catalog-content\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.780295 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-utilities\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.780326 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.780353 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4npnx\" (UniqueName: \"kubernetes.io/projected/5ca95312-780d-4552-9833-1ef36dd5d15d-kube-api-access-4npnx\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.781188 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-catalog-content\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.781283 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-utilities\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.781512 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.28149604 +0000 UTC m=+149.072213188 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.799852 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-ptfqn" podStartSLOduration=127.799825468 podStartE2EDuration="2m7.799825468s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:47.770075138 +0000 UTC m=+148.560792286" watchObservedRunningTime="2025-11-26 15:28:47.799825468 +0000 UTC m=+148.590542616" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.817158 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hb82b"] Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.818631 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.830183 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.848921 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hb82b"] Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.858952 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4npnx\" (UniqueName: \"kubernetes.io/projected/5ca95312-780d-4552-9833-1ef36dd5d15d-kube-api-access-4npnx\") pod \"certified-operators-7jk7d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.881414 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.881514 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.381498277 +0000 UTC m=+149.172215425 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.881833 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.882136 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.382127074 +0000 UTC m=+149.172844212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.933147 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.982736 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.983485 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmgfh\" (UniqueName: \"kubernetes.io/projected/b0730a77-df20-4d33-abd6-22de117337c3-kube-api-access-gmgfh\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.983628 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-utilities\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:47 crc kubenswrapper[5010]: I1126 15:28:47.983758 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-catalog-content\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:47 crc kubenswrapper[5010]: E1126 15:28:47.984115 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.484077446 +0000 UTC m=+149.274794594 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.023241 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k8fjs"] Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.024294 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.045242 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k8fjs"] Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.086458 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.086506 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-catalog-content\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.086551 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-utilities\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.086577 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmgfh\" (UniqueName: \"kubernetes.io/projected/b0730a77-df20-4d33-abd6-22de117337c3-kube-api-access-gmgfh\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.086595 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-utilities\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.086622 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-catalog-content\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.086685 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9prrp\" (UniqueName: \"kubernetes.io/projected/e9847f64-a32c-494f-8a71-283b25184c19-kube-api-access-9prrp\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.086996 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.586983214 +0000 UTC m=+149.377700362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.088170 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-utilities\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.088166 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-catalog-content\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.140159 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmgfh\" (UniqueName: \"kubernetes.io/projected/b0730a77-df20-4d33-abd6-22de117337c3-kube-api-access-gmgfh\") pod \"community-operators-hb82b\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.193967 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vk2zl"] Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.195403 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.195539 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.196030 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.695998005 +0000 UTC m=+149.486715153 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.196398 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9prrp\" (UniqueName: \"kubernetes.io/projected/e9847f64-a32c-494f-8a71-283b25184c19-kube-api-access-9prrp\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.196549 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.196671 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-catalog-content\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.196848 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-utilities\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.197536 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.697506998 +0000 UTC m=+149.488224136 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.198314 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-utilities\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.198804 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-catalog-content\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.230325 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.258621 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vk2zl"] Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.300907 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.301360 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-utilities\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.301429 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-czgjm\" (UniqueName: \"kubernetes.io/projected/79dbc879-38d9-4605-b382-01eec0def0ee-kube-api-access-czgjm\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.301502 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-catalog-content\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.301883 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.801860758 +0000 UTC m=+149.592577906 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.313076 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9prrp\" (UniqueName: \"kubernetes.io/projected/e9847f64-a32c-494f-8a71-283b25184c19-kube-api-access-9prrp\") pod \"certified-operators-k8fjs\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.358118 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" event={"ID":"c18f86a9-0cef-41d3-a371-dfcbb46f837f","Type":"ContainerStarted","Data":"29ef016ed431d23d14852054e026872fec3f56753d87e6c99ef7ed75cd770c32"} Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.382921 5010 generic.go:334] "Generic (PLEG): container finished" podID="446fb8a2-da33-4281-a0bf-98d3450a22e7" containerID="05ed2c83ae3c218944dc1a4d86cde50a307f28e77065ce746efe55da7554c619" exitCode=0 Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.383819 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" event={"ID":"446fb8a2-da33-4281-a0bf-98d3450a22e7","Type":"ContainerDied","Data":"05ed2c83ae3c218944dc1a4d86cde50a307f28e77065ce746efe55da7554c619"} Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.396544 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.389863 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:48 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:48 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:48 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.397632 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.414024 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-catalog-content\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.414205 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-utilities\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.414239 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.414331 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-czgjm\" (UniqueName: \"kubernetes.io/projected/79dbc879-38d9-4605-b382-01eec0def0ee-kube-api-access-czgjm\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.414986 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:48.914973224 +0000 UTC m=+149.705690362 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.415242 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-utilities\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.415502 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-catalog-content\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.463986 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" event={"ID":"5a3ecd68-6560-460d-b6ac-53209faecbd8","Type":"ContainerStarted","Data":"de2e45ceb42c664ad1c71395aface4abd2305fbd29537eb25ab837803cb1a583"} Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.479090 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-czgjm\" (UniqueName: \"kubernetes.io/projected/79dbc879-38d9-4605-b382-01eec0def0ee-kube-api-access-czgjm\") pod \"community-operators-vk2zl\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.503212 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-x4zhd" event={"ID":"7d5bba06-909e-4d1c-b379-53015d91e3fd","Type":"ContainerStarted","Data":"8f6fcdf1d2528086376ed5add35804cbbae63b4924cd248c361ec81280ea4079"} Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.515180 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.515764 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.515882 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.522081 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.02203724 +0000 UTC m=+149.812754388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.526044 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.532191 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.546620 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-x4zhd" podStartSLOduration=8.546593354 podStartE2EDuration="8.546593354s" podCreationTimestamp="2025-11-26 15:28:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:48.535772219 +0000 UTC m=+149.326489397" watchObservedRunningTime="2025-11-26 15:28:48.546593354 +0000 UTC m=+149.337310502" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.632468 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.643797 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.143775961 +0000 UTC m=+149.934493109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.740481 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.741037 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.741094 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.741119 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.742340 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.242321306 +0000 UTC m=+150.033038454 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.742626 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.748366 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.753937 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.754833 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.764988 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7jk7d"] Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.790971 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.843692 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.843767 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.844089 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.344073292 +0000 UTC m=+150.134790440 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.879635 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.945585 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.945770 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.445741846 +0000 UTC m=+150.236458994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:48 crc kubenswrapper[5010]: I1126 15:28:48.946127 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:48 crc kubenswrapper[5010]: E1126 15:28:48.946505 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.446497247 +0000 UTC m=+150.237214395 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.031632 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hb82b"] Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.048029 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.048482 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.548460589 +0000 UTC m=+150.339177727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.063060 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.155844 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.156930 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.656916574 +0000 UTC m=+150.447633712 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.184361 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-qhhpw" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.268050 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.268186 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.768161837 +0000 UTC m=+150.558878985 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.268432 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.268766 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.768757804 +0000 UTC m=+150.559474952 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.371540 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.372359 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.872342782 +0000 UTC m=+150.663059930 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.385018 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:49 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:49 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:49 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.385073 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.473558 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.473869 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:49.973857081 +0000 UTC m=+150.764574229 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.505493 5010 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-9xndz container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.505536 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" podUID="8c454fe1-8825-4c5f-a145-727f16df4b00" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.8:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.547346 5010 generic.go:334] "Generic (PLEG): container finished" podID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerID="eec43d6ee694e5ec0c4fa04eae693d1ecee499683bf6a8d2a7d6608202fa87ca" exitCode=0 Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.547419 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7jk7d" event={"ID":"5ca95312-780d-4552-9833-1ef36dd5d15d","Type":"ContainerDied","Data":"eec43d6ee694e5ec0c4fa04eae693d1ecee499683bf6a8d2a7d6608202fa87ca"} Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.547447 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7jk7d" event={"ID":"5ca95312-780d-4552-9833-1ef36dd5d15d","Type":"ContainerStarted","Data":"1a22c9f34f8766bd0331924f7124cbfd5431028f19408e01b820671be9503998"} Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.550453 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.576673 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.577346 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:50.077326285 +0000 UTC m=+150.868043443 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.579039 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-9xndz" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.588932 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" event={"ID":"5a3ecd68-6560-460d-b6ac-53209faecbd8","Type":"ContainerStarted","Data":"c7f0e11555f375addabff3e6797619bda1a1e948aabe5c9dcc96bccdae0d4d15"} Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.607108 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kfxxn"] Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.636086 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.636528 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" event={"ID":"c18f86a9-0cef-41d3-a371-dfcbb46f837f","Type":"ContainerStarted","Data":"707bd9a9e2693b7c1a89702c7e1808b861680d3ce15f8bdf02ffda200e420153"} Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.691725 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.705201 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.707111 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:50.207089733 +0000 UTC m=+150.997806881 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.717126 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfxxn"] Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.747112 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hb82b" event={"ID":"b0730a77-df20-4d33-abd6-22de117337c3","Type":"ContainerStarted","Data":"0a03f8e117a1a99eb1be0f45b46f39932a067742af98f94a34ebaaf5d3a9b873"} Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.760273 5010 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.809220 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.809478 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-utilities\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.809598 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-catalog-content\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.809623 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlf58\" (UniqueName: \"kubernetes.io/projected/a358d6b9-52e1-4088-9141-44059aa6e3af-kube-api-access-wlf58\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.809751 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:50.309735724 +0000 UTC m=+151.100452862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.906207 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" podStartSLOduration=130.90619209 podStartE2EDuration="2m10.90619209s" podCreationTimestamp="2025-11-26 15:26:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:49.829330358 +0000 UTC m=+150.620047506" watchObservedRunningTime="2025-11-26 15:28:49.90619209 +0000 UTC m=+150.696909228" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.913142 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-utilities\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.913560 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-catalog-content\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.913726 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlf58\" (UniqueName: \"kubernetes.io/projected/a358d6b9-52e1-4088-9141-44059aa6e3af-kube-api-access-wlf58\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.913910 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.914979 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-utilities\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.917522 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-catalog-content\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:49 crc kubenswrapper[5010]: E1126 15:28:49.918039 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 15:28:50.418023935 +0000 UTC m=+151.208741073 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-7hblv" (UID: "9986e410-984a-466f-bb26-b1644bc6c976") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:49 crc kubenswrapper[5010]: I1126 15:28:49.942581 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k8fjs"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.017109 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:50 crc kubenswrapper[5010]: E1126 15:28:50.017516 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 15:28:50.517498866 +0000 UTC m=+151.308216014 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.028174 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vk2zl"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.030816 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bdg87"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.032969 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.037216 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlf58\" (UniqueName: \"kubernetes.io/projected/a358d6b9-52e1-4088-9141-44059aa6e3af-kube-api-access-wlf58\") pod \"redhat-marketplace-kfxxn\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.045921 5010 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-26T15:28:49.760302207Z","Handler":null,"Name":""} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.100995 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bdg87"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.106972 5010 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.107006 5010 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.118246 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjnrs\" (UniqueName: \"kubernetes.io/projected/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-kube-api-access-jjnrs\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.118298 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.118325 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-catalog-content\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.118362 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-utilities\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: W1126 15:28:50.133348 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-2c178c30ccaba68320b17a0e8f13a2752e9aae21bd61ac7a0f8c961309d2677f WatchSource:0}: Error finding container 2c178c30ccaba68320b17a0e8f13a2752e9aae21bd61ac7a0f8c961309d2677f: Status 404 returned error can't find the container with id 2c178c30ccaba68320b17a0e8f13a2752e9aae21bd61ac7a0f8c961309d2677f Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.166966 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.167039 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.219912 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-utilities\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.219994 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjnrs\" (UniqueName: \"kubernetes.io/projected/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-kube-api-access-jjnrs\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.220034 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-catalog-content\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.220436 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-utilities\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.220519 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-catalog-content\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.260436 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjnrs\" (UniqueName: \"kubernetes.io/projected/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-kube-api-access-jjnrs\") pod \"redhat-marketplace-bdg87\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.321003 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.323901 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.375885 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:50 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:50 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:50 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.376346 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.384363 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-7hblv\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.422250 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 15:28:50 crc kubenswrapper[5010]: E1126 15:28:50.422556 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="446fb8a2-da33-4281-a0bf-98d3450a22e7" containerName="collect-profiles" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.422578 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="446fb8a2-da33-4281-a0bf-98d3450a22e7" containerName="collect-profiles" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.423008 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.423094 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9qls\" (UniqueName: \"kubernetes.io/projected/446fb8a2-da33-4281-a0bf-98d3450a22e7-kube-api-access-d9qls\") pod \"446fb8a2-da33-4281-a0bf-98d3450a22e7\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.423146 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/446fb8a2-da33-4281-a0bf-98d3450a22e7-config-volume\") pod \"446fb8a2-da33-4281-a0bf-98d3450a22e7\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.423192 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/446fb8a2-da33-4281-a0bf-98d3450a22e7-secret-volume\") pod \"446fb8a2-da33-4281-a0bf-98d3450a22e7\" (UID: \"446fb8a2-da33-4281-a0bf-98d3450a22e7\") " Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.427460 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/446fb8a2-da33-4281-a0bf-98d3450a22e7-config-volume" (OuterVolumeSpecName: "config-volume") pod "446fb8a2-da33-4281-a0bf-98d3450a22e7" (UID: "446fb8a2-da33-4281-a0bf-98d3450a22e7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.427875 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.429111 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/446fb8a2-da33-4281-a0bf-98d3450a22e7-kube-api-access-d9qls" (OuterVolumeSpecName: "kube-api-access-d9qls") pod "446fb8a2-da33-4281-a0bf-98d3450a22e7" (UID: "446fb8a2-da33-4281-a0bf-98d3450a22e7"). InnerVolumeSpecName "kube-api-access-d9qls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.430130 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/446fb8a2-da33-4281-a0bf-98d3450a22e7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "446fb8a2-da33-4281-a0bf-98d3450a22e7" (UID: "446fb8a2-da33-4281-a0bf-98d3450a22e7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.433266 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="446fb8a2-da33-4281-a0bf-98d3450a22e7" containerName="collect-profiles" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.434002 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.434175 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.438065 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.438321 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.462057 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.497017 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.526353 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/155bc56e-ae99-43b4-86cf-83800e3cff58-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.526462 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/155bc56e-ae99-43b4-86cf-83800e3cff58-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.526506 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9qls\" (UniqueName: \"kubernetes.io/projected/446fb8a2-da33-4281-a0bf-98d3450a22e7-kube-api-access-d9qls\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.526516 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/446fb8a2-da33-4281-a0bf-98d3450a22e7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.526526 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/446fb8a2-da33-4281-a0bf-98d3450a22e7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.631464 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/155bc56e-ae99-43b4-86cf-83800e3cff58-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.632006 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/155bc56e-ae99-43b4-86cf-83800e3cff58-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.633531 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/155bc56e-ae99-43b4-86cf-83800e3cff58-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.658141 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/155bc56e-ae99-43b4-86cf-83800e3cff58-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.750961 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfxxn"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.772081 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-t88lc"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.773184 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:50 crc kubenswrapper[5010]: W1126 15:28:50.775501 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda358d6b9_52e1_4088_9141_44059aa6e3af.slice/crio-2c632f0c0d577a5b0db64ab0e842be8184795ba69f36b1c7fe149c9b50f5bff9 WatchSource:0}: Error finding container 2c632f0c0d577a5b0db64ab0e842be8184795ba69f36b1c7fe149c9b50f5bff9: Status 404 returned error can't find the container with id 2c632f0c0d577a5b0db64ab0e842be8184795ba69f36b1c7fe149c9b50f5bff9 Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.775759 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.798689 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t88lc"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.800139 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ea5b6b2dc915c1cc14492b4df0dce08d268d4c45638ee56543fcd3fd47ad142b"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.800177 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"2c178c30ccaba68320b17a0e8f13a2752e9aae21bd61ac7a0f8c961309d2677f"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.801628 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.812079 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"3ffbc8a0ec0275416a3365d6cea5e03a508648afd17ade91e968e2ef5b46d1f0"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.812478 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"c0a9f0c10892abd94639248ad56d4ec680cf6285d091388d9f2e632271d63950"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.828639 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"2acd1ddece050453eaeca2725e013eb8a331e44aa79270f3c4de4295e67d6ed3"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.828685 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"dba643d56376d3695a3706483362ca6573616607e6982ef3be5c5fe85730081c"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.831429 5010 generic.go:334] "Generic (PLEG): container finished" podID="b0730a77-df20-4d33-abd6-22de117337c3" containerID="c7c7c348dfa4fdb35cdc0a3f012d9ec5bc5b45f1d41e5d5dbe7e8ed473cb7df7" exitCode=0 Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.831476 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hb82b" event={"ID":"b0730a77-df20-4d33-abd6-22de117337c3","Type":"ContainerDied","Data":"c7c7c348dfa4fdb35cdc0a3f012d9ec5bc5b45f1d41e5d5dbe7e8ed473cb7df7"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.840450 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.840522 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6" event={"ID":"446fb8a2-da33-4281-a0bf-98d3450a22e7","Type":"ContainerDied","Data":"fcd23aed1654bd290f517b5300f3ee91f19bcb0b6bd292512d3f728528a2e39a"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.840587 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fcd23aed1654bd290f517b5300f3ee91f19bcb0b6bd292512d3f728528a2e39a" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.847176 5010 generic.go:334] "Generic (PLEG): container finished" podID="e9847f64-a32c-494f-8a71-283b25184c19" containerID="4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480" exitCode=0 Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.847463 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8fjs" event={"ID":"e9847f64-a32c-494f-8a71-283b25184c19","Type":"ContainerDied","Data":"4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.847496 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8fjs" event={"ID":"e9847f64-a32c-494f-8a71-283b25184c19","Type":"ContainerStarted","Data":"e8644509a19cf01418717f0e0cfdd7ca45a0a8956f810a5394aeb2095cc70f0b"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.852326 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.893162 5010 generic.go:334] "Generic (PLEG): container finished" podID="79dbc879-38d9-4605-b382-01eec0def0ee" containerID="c56b40795f323c0b41866e09fba43a86c5b48c74b226f75aa85fd6bb560ea51e" exitCode=0 Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.893250 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vk2zl" event={"ID":"79dbc879-38d9-4605-b382-01eec0def0ee","Type":"ContainerDied","Data":"c56b40795f323c0b41866e09fba43a86c5b48c74b226f75aa85fd6bb560ea51e"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.893302 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vk2zl" event={"ID":"79dbc879-38d9-4605-b382-01eec0def0ee","Type":"ContainerStarted","Data":"7c8119c41edec4968aad13037f27553bf70b48fc7a78f69effe16e187032f5b6"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.897977 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bdg87"] Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.901973 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" event={"ID":"5a3ecd68-6560-460d-b6ac-53209faecbd8","Type":"ContainerStarted","Data":"b2856cfd8260c1d3d26c97ada620659eae00125819220341693f9f0c7baa08c6"} Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.938647 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-utilities\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.938694 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77n5g\" (UniqueName: \"kubernetes.io/projected/91353bbb-798f-47cc-96b2-0dfeee2938f0-kube-api-access-77n5g\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:50 crc kubenswrapper[5010]: I1126 15:28:50.938746 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-catalog-content\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.040383 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-utilities\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.040488 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77n5g\" (UniqueName: \"kubernetes.io/projected/91353bbb-798f-47cc-96b2-0dfeee2938f0-kube-api-access-77n5g\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.040518 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-catalog-content\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.041095 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-catalog-content\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.052120 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-utilities\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.068636 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7hblv"] Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.098330 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-tbs79" podStartSLOduration=11.098297193 podStartE2EDuration="11.098297193s" podCreationTimestamp="2025-11-26 15:28:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:51.096038229 +0000 UTC m=+151.886755387" watchObservedRunningTime="2025-11-26 15:28:51.098297193 +0000 UTC m=+151.889014341" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.130871 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77n5g\" (UniqueName: \"kubernetes.io/projected/91353bbb-798f-47cc-96b2-0dfeee2938f0-kube-api-access-77n5g\") pod \"redhat-operators-t88lc\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.184513 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qct4n"] Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.185941 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.195573 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qct4n"] Nov 26 15:28:51 crc kubenswrapper[5010]: E1126 15:28:51.223386 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda358d6b9_52e1_4088_9141_44059aa6e3af.slice/crio-1d7b1afb522b768f7cf5f164b80a53bc6b6d6f521565b82f3600c0dde0f25adb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda358d6b9_52e1_4088_9141_44059aa6e3af.slice/crio-conmon-1d7b1afb522b768f7cf5f164b80a53bc6b6d6f521565b82f3600c0dde0f25adb.scope\": RecentStats: unable to find data in memory cache]" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.244888 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-catalog-content\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.244944 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp68r\" (UniqueName: \"kubernetes.io/projected/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-kube-api-access-fp68r\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.245007 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-utilities\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.346657 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp68r\" (UniqueName: \"kubernetes.io/projected/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-kube-api-access-fp68r\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.347175 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-utilities\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.347205 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-catalog-content\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.348325 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-catalog-content\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.348910 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-utilities\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.361166 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.371296 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:51 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:51 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:51 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.371362 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.371548 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp68r\" (UniqueName: \"kubernetes.io/projected/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-kube-api-access-fp68r\") pod \"redhat-operators-qct4n\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.415441 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.536694 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.772310 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-t88lc"] Nov 26 15:28:51 crc kubenswrapper[5010]: W1126 15:28:51.831847 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91353bbb_798f_47cc_96b2_0dfeee2938f0.slice/crio-f479abaafe10b7d57ee684a9ad2332d02b68474fc059294b931e9fae3b5d410d WatchSource:0}: Error finding container f479abaafe10b7d57ee684a9ad2332d02b68474fc059294b931e9fae3b5d410d: Status 404 returned error can't find the container with id f479abaafe10b7d57ee684a9ad2332d02b68474fc059294b931e9fae3b5d410d Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.847169 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qct4n"] Nov 26 15:28:51 crc kubenswrapper[5010]: W1126 15:28:51.854761 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3db4017e_aa19_4e80_a2bc_d1a682fe2e98.slice/crio-7d45406a8394ab1dbc9504e5cd57150623062fc2c7f575210b8ec185449c3673 WatchSource:0}: Error finding container 7d45406a8394ab1dbc9504e5cd57150623062fc2c7f575210b8ec185449c3673: Status 404 returned error can't find the container with id 7d45406a8394ab1dbc9504e5cd57150623062fc2c7f575210b8ec185449c3673 Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.924403 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.933529 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" event={"ID":"9986e410-984a-466f-bb26-b1644bc6c976","Type":"ContainerStarted","Data":"629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.933591 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" event={"ID":"9986e410-984a-466f-bb26-b1644bc6c976","Type":"ContainerStarted","Data":"f3877539c886cf0ac476a94581889aea99892c90c79e412a27ad415e4f9e65b1"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.933766 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.937789 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"155bc56e-ae99-43b4-86cf-83800e3cff58","Type":"ContainerStarted","Data":"99501bd59f6695e6733f1d365fe12bbd47c1639d09463bba0916a658f02948f8"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.937833 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"155bc56e-ae99-43b4-86cf-83800e3cff58","Type":"ContainerStarted","Data":"dfa232a106d0d69567baf014edfeb8edd4e78fc0932b7ca0b87438ab63154e7e"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.941746 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t88lc" event={"ID":"91353bbb-798f-47cc-96b2-0dfeee2938f0","Type":"ContainerStarted","Data":"f479abaafe10b7d57ee684a9ad2332d02b68474fc059294b931e9fae3b5d410d"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.945164 5010 generic.go:334] "Generic (PLEG): container finished" podID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerID="77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28" exitCode=0 Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.945249 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdg87" event={"ID":"3ab9d996-a6a7-4204-abc2-0ec28f6bc569","Type":"ContainerDied","Data":"77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.945279 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdg87" event={"ID":"3ab9d996-a6a7-4204-abc2-0ec28f6bc569","Type":"ContainerStarted","Data":"77e9b137e9c2b630b261c54fe075682e7f69f07cffa35382e18aef721d86ff74"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.956291 5010 generic.go:334] "Generic (PLEG): container finished" podID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerID="1d7b1afb522b768f7cf5f164b80a53bc6b6d6f521565b82f3600c0dde0f25adb" exitCode=0 Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.956369 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfxxn" event={"ID":"a358d6b9-52e1-4088-9141-44059aa6e3af","Type":"ContainerDied","Data":"1d7b1afb522b768f7cf5f164b80a53bc6b6d6f521565b82f3600c0dde0f25adb"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.956394 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfxxn" event={"ID":"a358d6b9-52e1-4088-9141-44059aa6e3af","Type":"ContainerStarted","Data":"2c632f0c0d577a5b0db64ab0e842be8184795ba69f36b1c7fe149c9b50f5bff9"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.959228 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qct4n" event={"ID":"3db4017e-aa19-4e80-a2bc-d1a682fe2e98","Type":"ContainerStarted","Data":"7d45406a8394ab1dbc9504e5cd57150623062fc2c7f575210b8ec185449c3673"} Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.965428 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" podStartSLOduration=131.96539834 podStartE2EDuration="2m11.96539834s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:51.96116562 +0000 UTC m=+152.751882768" watchObservedRunningTime="2025-11-26 15:28:51.96539834 +0000 UTC m=+152.756115488" Nov 26 15:28:51 crc kubenswrapper[5010]: I1126 15:28:51.982127 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=1.982102572 podStartE2EDuration="1.982102572s" podCreationTimestamp="2025-11-26 15:28:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:28:51.978363176 +0000 UTC m=+152.769080344" watchObservedRunningTime="2025-11-26 15:28:51.982102572 +0000 UTC m=+152.772819720" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.356294 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.359403 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.360433 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.363204 5010 patch_prober.go:28] interesting pod/console-f9d7485db-rh2vd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.363279 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-rh2vd" podUID="f0d44623-c021-45d4-bc90-b40247ec17ef" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.368624 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-6bsj2" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.382389 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:52 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:52 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:52 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.382935 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.715085 5010 patch_prober.go:28] interesting pod/downloads-7954f5f757-p6kxm container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.715594 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-p6kxm" podUID="22abed70-9135-4e67-a009-b013ada1f720" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.715169 5010 patch_prober.go:28] interesting pod/downloads-7954f5f757-p6kxm container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.715728 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-p6kxm" podUID="22abed70-9135-4e67-a009-b013ada1f720" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.832418 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.832488 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.847537 5010 patch_prober.go:28] interesting pod/apiserver-76f77b778f-djqn5 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]log ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]etcd ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/generic-apiserver-start-informers ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/max-in-flight-filter ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 26 15:28:52 crc kubenswrapper[5010]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/project.openshift.io-projectcache ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/openshift.io-startinformers ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 26 15:28:52 crc kubenswrapper[5010]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 26 15:28:52 crc kubenswrapper[5010]: livez check failed Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.847614 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" podUID="c18f86a9-0cef-41d3-a371-dfcbb46f837f" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.995662 5010 generic.go:334] "Generic (PLEG): container finished" podID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerID="9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e" exitCode=0 Nov 26 15:28:52 crc kubenswrapper[5010]: I1126 15:28:52.995843 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qct4n" event={"ID":"3db4017e-aa19-4e80-a2bc-d1a682fe2e98","Type":"ContainerDied","Data":"9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e"} Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.009432 5010 generic.go:334] "Generic (PLEG): container finished" podID="155bc56e-ae99-43b4-86cf-83800e3cff58" containerID="99501bd59f6695e6733f1d365fe12bbd47c1639d09463bba0916a658f02948f8" exitCode=0 Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.009513 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"155bc56e-ae99-43b4-86cf-83800e3cff58","Type":"ContainerDied","Data":"99501bd59f6695e6733f1d365fe12bbd47c1639d09463bba0916a658f02948f8"} Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.016037 5010 generic.go:334] "Generic (PLEG): container finished" podID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerID="0d698fadb22a53d7e7b37408f09489ca97e77dabcc1a37c540ad81c66a9d777e" exitCode=0 Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.016245 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t88lc" event={"ID":"91353bbb-798f-47cc-96b2-0dfeee2938f0","Type":"ContainerDied","Data":"0d698fadb22a53d7e7b37408f09489ca97e77dabcc1a37c540ad81c66a9d777e"} Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.368374 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.371598 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:53 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:53 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:53 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.371667 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.928498 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.929384 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.935626 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.937437 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 15:28:53 crc kubenswrapper[5010]: I1126 15:28:53.937578 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.031829 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/26bc380f-7be2-473d-98b2-9acb6b8309c6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.031886 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26bc380f-7be2-473d-98b2-9acb6b8309c6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.134498 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/26bc380f-7be2-473d-98b2-9acb6b8309c6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.134551 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26bc380f-7be2-473d-98b2-9acb6b8309c6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.134676 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/26bc380f-7be2-473d-98b2-9acb6b8309c6-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.163180 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26bc380f-7be2-473d-98b2-9acb6b8309c6-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.250827 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.336879 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.369681 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:54 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:54 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:54 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.369800 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.438640 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/155bc56e-ae99-43b4-86cf-83800e3cff58-kubelet-dir\") pod \"155bc56e-ae99-43b4-86cf-83800e3cff58\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.438835 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/155bc56e-ae99-43b4-86cf-83800e3cff58-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "155bc56e-ae99-43b4-86cf-83800e3cff58" (UID: "155bc56e-ae99-43b4-86cf-83800e3cff58"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.438907 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/155bc56e-ae99-43b4-86cf-83800e3cff58-kube-api-access\") pod \"155bc56e-ae99-43b4-86cf-83800e3cff58\" (UID: \"155bc56e-ae99-43b4-86cf-83800e3cff58\") " Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.439282 5010 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/155bc56e-ae99-43b4-86cf-83800e3cff58-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.449792 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/155bc56e-ae99-43b4-86cf-83800e3cff58-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "155bc56e-ae99-43b4-86cf-83800e3cff58" (UID: "155bc56e-ae99-43b4-86cf-83800e3cff58"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.541777 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/155bc56e-ae99-43b4-86cf-83800e3cff58-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 15:28:54 crc kubenswrapper[5010]: I1126 15:28:54.883749 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 15:28:54 crc kubenswrapper[5010]: W1126 15:28:54.927788 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod26bc380f_7be2_473d_98b2_9acb6b8309c6.slice/crio-5b8fc202ca2602d486de66a4e2d3a17dcc48b7f98494c66628adc44474285f77 WatchSource:0}: Error finding container 5b8fc202ca2602d486de66a4e2d3a17dcc48b7f98494c66628adc44474285f77: Status 404 returned error can't find the container with id 5b8fc202ca2602d486de66a4e2d3a17dcc48b7f98494c66628adc44474285f77 Nov 26 15:28:55 crc kubenswrapper[5010]: I1126 15:28:55.121756 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"26bc380f-7be2-473d-98b2-9acb6b8309c6","Type":"ContainerStarted","Data":"5b8fc202ca2602d486de66a4e2d3a17dcc48b7f98494c66628adc44474285f77"} Nov 26 15:28:55 crc kubenswrapper[5010]: I1126 15:28:55.142144 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"155bc56e-ae99-43b4-86cf-83800e3cff58","Type":"ContainerDied","Data":"dfa232a106d0d69567baf014edfeb8edd4e78fc0932b7ca0b87438ab63154e7e"} Nov 26 15:28:55 crc kubenswrapper[5010]: I1126 15:28:55.142199 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dfa232a106d0d69567baf014edfeb8edd4e78fc0932b7ca0b87438ab63154e7e" Nov 26 15:28:55 crc kubenswrapper[5010]: I1126 15:28:55.142292 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 15:28:55 crc kubenswrapper[5010]: I1126 15:28:55.370473 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:55 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:55 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:55 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:55 crc kubenswrapper[5010]: I1126 15:28:55.370544 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:56 crc kubenswrapper[5010]: I1126 15:28:56.370170 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:56 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:56 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:56 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:56 crc kubenswrapper[5010]: I1126 15:28:56.370751 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:57 crc kubenswrapper[5010]: I1126 15:28:57.132958 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:28:57 crc kubenswrapper[5010]: I1126 15:28:57.162663 5010 generic.go:334] "Generic (PLEG): container finished" podID="26bc380f-7be2-473d-98b2-9acb6b8309c6" containerID="60b8ff6955f1e9cbb57cf4ff4a405d311393be1e75916b78e277ad6deabd7880" exitCode=0 Nov 26 15:28:57 crc kubenswrapper[5010]: I1126 15:28:57.162777 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"26bc380f-7be2-473d-98b2-9acb6b8309c6","Type":"ContainerDied","Data":"60b8ff6955f1e9cbb57cf4ff4a405d311393be1e75916b78e277ad6deabd7880"} Nov 26 15:28:57 crc kubenswrapper[5010]: I1126 15:28:57.373922 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:57 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:57 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:57 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:57 crc kubenswrapper[5010]: I1126 15:28:57.374270 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:57 crc kubenswrapper[5010]: I1126 15:28:57.837316 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:57 crc kubenswrapper[5010]: I1126 15:28:57.843301 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-djqn5" Nov 26 15:28:58 crc kubenswrapper[5010]: I1126 15:28:58.247525 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-x4zhd" Nov 26 15:28:58 crc kubenswrapper[5010]: I1126 15:28:58.373070 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:58 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:58 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:58 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:58 crc kubenswrapper[5010]: I1126 15:28:58.373128 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:28:59 crc kubenswrapper[5010]: I1126 15:28:59.369877 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:28:59 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:28:59 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:28:59 crc kubenswrapper[5010]: healthz check failed Nov 26 15:28:59 crc kubenswrapper[5010]: I1126 15:28:59.370161 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:00 crc kubenswrapper[5010]: I1126 15:29:00.368929 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:29:00 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:29:00 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:29:00 crc kubenswrapper[5010]: healthz check failed Nov 26 15:29:00 crc kubenswrapper[5010]: I1126 15:29:00.369000 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:01 crc kubenswrapper[5010]: I1126 15:29:01.369610 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:29:01 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:29:01 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:29:01 crc kubenswrapper[5010]: healthz check failed Nov 26 15:29:01 crc kubenswrapper[5010]: I1126 15:29:01.369665 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.359629 5010 patch_prober.go:28] interesting pod/console-f9d7485db-rh2vd container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.360070 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-rh2vd" podUID="f0d44623-c021-45d4-bc90-b40247ec17ef" containerName="console" probeResult="failure" output="Get \"https://10.217.0.11:8443/health\": dial tcp 10.217.0.11:8443: connect: connection refused" Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.368977 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:29:02 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:29:02 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:29:02 crc kubenswrapper[5010]: healthz check failed Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.369054 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.714838 5010 patch_prober.go:28] interesting pod/downloads-7954f5f757-p6kxm container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.714936 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-p6kxm" podUID="22abed70-9135-4e67-a009-b013ada1f720" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.715017 5010 patch_prober.go:28] interesting pod/downloads-7954f5f757-p6kxm container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" start-of-body= Nov 26 15:29:02 crc kubenswrapper[5010]: I1126 15:29:02.715083 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-p6kxm" podUID="22abed70-9135-4e67-a009-b013ada1f720" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.13:8080/\": dial tcp 10.217.0.13:8080: connect: connection refused" Nov 26 15:29:03 crc kubenswrapper[5010]: I1126 15:29:03.369381 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:29:03 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:29:03 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:29:03 crc kubenswrapper[5010]: healthz check failed Nov 26 15:29:03 crc kubenswrapper[5010]: I1126 15:29:03.369505 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:03 crc kubenswrapper[5010]: I1126 15:29:03.828944 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:29:03 crc kubenswrapper[5010]: I1126 15:29:03.841474 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fd9f5a65-e633-439f-8e8d-b760d20a3223-metrics-certs\") pod \"network-metrics-daemon-df2ll\" (UID: \"fd9f5a65-e633-439f-8e8d-b760d20a3223\") " pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:29:04 crc kubenswrapper[5010]: I1126 15:29:04.080276 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-df2ll" Nov 26 15:29:04 crc kubenswrapper[5010]: I1126 15:29:04.371038 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:29:04 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:29:04 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:29:04 crc kubenswrapper[5010]: healthz check failed Nov 26 15:29:04 crc kubenswrapper[5010]: I1126 15:29:04.371118 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:04 crc kubenswrapper[5010]: I1126 15:29:04.950768 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.048863 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/26bc380f-7be2-473d-98b2-9acb6b8309c6-kubelet-dir\") pod \"26bc380f-7be2-473d-98b2-9acb6b8309c6\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.049256 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/26bc380f-7be2-473d-98b2-9acb6b8309c6-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "26bc380f-7be2-473d-98b2-9acb6b8309c6" (UID: "26bc380f-7be2-473d-98b2-9acb6b8309c6"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.137110 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-df2ll"] Nov 26 15:29:05 crc kubenswrapper[5010]: W1126 15:29:05.147398 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfd9f5a65_e633_439f_8e8d_b760d20a3223.slice/crio-84043d1f339d8ac9b3cc79fcd2639ff2a9d4f9776f32d4edb40e10fb22cf13a0 WatchSource:0}: Error finding container 84043d1f339d8ac9b3cc79fcd2639ff2a9d4f9776f32d4edb40e10fb22cf13a0: Status 404 returned error can't find the container with id 84043d1f339d8ac9b3cc79fcd2639ff2a9d4f9776f32d4edb40e10fb22cf13a0 Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.149422 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26bc380f-7be2-473d-98b2-9acb6b8309c6-kube-api-access\") pod \"26bc380f-7be2-473d-98b2-9acb6b8309c6\" (UID: \"26bc380f-7be2-473d-98b2-9acb6b8309c6\") " Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.149872 5010 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/26bc380f-7be2-473d-98b2-9acb6b8309c6-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.154429 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26bc380f-7be2-473d-98b2-9acb6b8309c6-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "26bc380f-7be2-473d-98b2-9acb6b8309c6" (UID: "26bc380f-7be2-473d-98b2-9acb6b8309c6"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.239154 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-df2ll" event={"ID":"fd9f5a65-e633-439f-8e8d-b760d20a3223","Type":"ContainerStarted","Data":"84043d1f339d8ac9b3cc79fcd2639ff2a9d4f9776f32d4edb40e10fb22cf13a0"} Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.241705 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"26bc380f-7be2-473d-98b2-9acb6b8309c6","Type":"ContainerDied","Data":"5b8fc202ca2602d486de66a4e2d3a17dcc48b7f98494c66628adc44474285f77"} Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.241794 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b8fc202ca2602d486de66a4e2d3a17dcc48b7f98494c66628adc44474285f77" Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.241874 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.251554 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/26bc380f-7be2-473d-98b2-9acb6b8309c6-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.371164 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:29:05 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:29:05 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:29:05 crc kubenswrapper[5010]: healthz check failed Nov 26 15:29:05 crc kubenswrapper[5010]: I1126 15:29:05.371238 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:06 crc kubenswrapper[5010]: I1126 15:29:06.266858 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-df2ll" event={"ID":"fd9f5a65-e633-439f-8e8d-b760d20a3223","Type":"ContainerStarted","Data":"da3918a04fc303e3cdbc264cfdf0d3d774ccb26c7446d09c5b3a81c422c16bef"} Nov 26 15:29:06 crc kubenswrapper[5010]: I1126 15:29:06.371777 5010 patch_prober.go:28] interesting pod/router-default-5444994796-ptfqn container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 15:29:06 crc kubenswrapper[5010]: [-]has-synced failed: reason withheld Nov 26 15:29:06 crc kubenswrapper[5010]: [+]process-running ok Nov 26 15:29:06 crc kubenswrapper[5010]: healthz check failed Nov 26 15:29:06 crc kubenswrapper[5010]: I1126 15:29:06.371870 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ptfqn" podUID="90ac104e-9059-4bf4-8d44-0ce8ffb5c08a" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 15:29:07 crc kubenswrapper[5010]: I1126 15:29:07.369640 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:29:07 crc kubenswrapper[5010]: I1126 15:29:07.373083 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-ptfqn" Nov 26 15:29:10 crc kubenswrapper[5010]: I1126 15:29:10.502418 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:29:11 crc kubenswrapper[5010]: I1126 15:29:11.422756 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:29:11 crc kubenswrapper[5010]: I1126 15:29:11.423078 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:29:12 crc kubenswrapper[5010]: I1126 15:29:12.382877 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:29:12 crc kubenswrapper[5010]: I1126 15:29:12.386744 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:29:12 crc kubenswrapper[5010]: I1126 15:29:12.735939 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-p6kxm" Nov 26 15:29:22 crc kubenswrapper[5010]: E1126 15:29:22.526109 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 15:29:22 crc kubenswrapper[5010]: E1126 15:29:22.527310 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wlf58,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-kfxxn_openshift-marketplace(a358d6b9-52e1-4088-9141-44059aa6e3af): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:29:22 crc kubenswrapper[5010]: E1126 15:29:22.528596 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-kfxxn" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" Nov 26 15:29:22 crc kubenswrapper[5010]: I1126 15:29:22.880286 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-qh68l" Nov 26 15:29:23 crc kubenswrapper[5010]: E1126 15:29:23.425104 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-kfxxn" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" Nov 26 15:29:28 crc kubenswrapper[5010]: E1126 15:29:28.678428 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 15:29:28 crc kubenswrapper[5010]: E1126 15:29:28.679273 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gmgfh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-hb82b_openshift-marketplace(b0730a77-df20-4d33-abd6-22de117337c3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:29:28 crc kubenswrapper[5010]: E1126 15:29:28.680687 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-hb82b" podUID="b0730a77-df20-4d33-abd6-22de117337c3" Nov 26 15:29:29 crc kubenswrapper[5010]: I1126 15:29:29.073841 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.349342 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-hb82b" podUID="b0730a77-df20-4d33-abd6-22de117337c3" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.516600 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.516974 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4npnx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-7jk7d_openshift-marketplace(5ca95312-780d-4552-9833-1ef36dd5d15d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.518651 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-7jk7d" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.554001 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.554206 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jjnrs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-bdg87_openshift-marketplace(3ab9d996-a6a7-4204-abc2-0ec28f6bc569): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.555411 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-bdg87" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.559940 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.561097 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9prrp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-k8fjs_openshift-marketplace(e9847f64-a32c-494f-8a71-283b25184c19): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.562819 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-k8fjs" podUID="e9847f64-a32c-494f-8a71-283b25184c19" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.578647 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.579099 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-czgjm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-vk2zl_openshift-marketplace(79dbc879-38d9-4605-b382-01eec0def0ee): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.580435 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-vk2zl" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.614837 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.615034 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-77n5g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-t88lc_openshift-marketplace(91353bbb-798f-47cc-96b2-0dfeee2938f0): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:29:30 crc kubenswrapper[5010]: E1126 15:29:30.616264 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-t88lc" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" Nov 26 15:29:31 crc kubenswrapper[5010]: I1126 15:29:31.445656 5010 generic.go:334] "Generic (PLEG): container finished" podID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerID="3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067" exitCode=0 Nov 26 15:29:31 crc kubenswrapper[5010]: I1126 15:29:31.445784 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qct4n" event={"ID":"3db4017e-aa19-4e80-a2bc-d1a682fe2e98","Type":"ContainerDied","Data":"3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067"} Nov 26 15:29:31 crc kubenswrapper[5010]: I1126 15:29:31.457343 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-df2ll" event={"ID":"fd9f5a65-e633-439f-8e8d-b760d20a3223","Type":"ContainerStarted","Data":"177b4c89b1ac89d68c86cd6ab96bfea2dd663373be8101667af9a71545ec259b"} Nov 26 15:29:31 crc kubenswrapper[5010]: E1126 15:29:31.462392 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-vk2zl" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" Nov 26 15:29:31 crc kubenswrapper[5010]: E1126 15:29:31.464888 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-7jk7d" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" Nov 26 15:29:31 crc kubenswrapper[5010]: E1126 15:29:31.465025 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-bdg87" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" Nov 26 15:29:31 crc kubenswrapper[5010]: E1126 15:29:31.470535 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-k8fjs" podUID="e9847f64-a32c-494f-8a71-283b25184c19" Nov 26 15:29:31 crc kubenswrapper[5010]: E1126 15:29:31.477481 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-t88lc" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" Nov 26 15:29:31 crc kubenswrapper[5010]: I1126 15:29:31.528669 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-df2ll" podStartSLOduration=171.528646625 podStartE2EDuration="2m51.528646625s" podCreationTimestamp="2025-11-26 15:26:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:29:31.522667766 +0000 UTC m=+192.313384924" watchObservedRunningTime="2025-11-26 15:29:31.528646625 +0000 UTC m=+192.319363773" Nov 26 15:29:32 crc kubenswrapper[5010]: I1126 15:29:32.473495 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qct4n" event={"ID":"3db4017e-aa19-4e80-a2bc-d1a682fe2e98","Type":"ContainerStarted","Data":"141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b"} Nov 26 15:29:32 crc kubenswrapper[5010]: I1126 15:29:32.511029 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qct4n" podStartSLOduration=2.595773507 podStartE2EDuration="41.511002209s" podCreationTimestamp="2025-11-26 15:28:51 +0000 UTC" firstStartedPulling="2025-11-26 15:28:52.998521048 +0000 UTC m=+153.789238196" lastFinishedPulling="2025-11-26 15:29:31.91374971 +0000 UTC m=+192.704466898" observedRunningTime="2025-11-26 15:29:32.508782176 +0000 UTC m=+193.299499404" watchObservedRunningTime="2025-11-26 15:29:32.511002209 +0000 UTC m=+193.301719387" Nov 26 15:29:35 crc kubenswrapper[5010]: I1126 15:29:35.496087 5010 generic.go:334] "Generic (PLEG): container finished" podID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerID="3d50a99f82dec924dfa5ef66844bcde9d8aaaa7a4ad3015a32ec4cac8c0c220e" exitCode=0 Nov 26 15:29:35 crc kubenswrapper[5010]: I1126 15:29:35.496171 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfxxn" event={"ID":"a358d6b9-52e1-4088-9141-44059aa6e3af","Type":"ContainerDied","Data":"3d50a99f82dec924dfa5ef66844bcde9d8aaaa7a4ad3015a32ec4cac8c0c220e"} Nov 26 15:29:36 crc kubenswrapper[5010]: I1126 15:29:36.505551 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfxxn" event={"ID":"a358d6b9-52e1-4088-9141-44059aa6e3af","Type":"ContainerStarted","Data":"752a54f60ee6c1c44082e68ad2cd36b78ba2792eecb0950aaf8eabd16dab631d"} Nov 26 15:29:36 crc kubenswrapper[5010]: I1126 15:29:36.532073 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kfxxn" podStartSLOduration=3.492602053 podStartE2EDuration="47.532056008s" podCreationTimestamp="2025-11-26 15:28:49 +0000 UTC" firstStartedPulling="2025-11-26 15:28:51.957936819 +0000 UTC m=+152.748653967" lastFinishedPulling="2025-11-26 15:29:35.997390764 +0000 UTC m=+196.788107922" observedRunningTime="2025-11-26 15:29:36.528594135 +0000 UTC m=+197.319311283" watchObservedRunningTime="2025-11-26 15:29:36.532056008 +0000 UTC m=+197.322773156" Nov 26 15:29:40 crc kubenswrapper[5010]: I1126 15:29:40.324427 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:29:40 crc kubenswrapper[5010]: I1126 15:29:40.324956 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:29:40 crc kubenswrapper[5010]: I1126 15:29:40.543852 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.423279 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.423405 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.423556 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.425039 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.425330 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16" gracePeriod=600 Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.537217 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.537255 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:29:41 crc kubenswrapper[5010]: I1126 15:29:41.588368 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:29:42 crc kubenswrapper[5010]: I1126 15:29:42.546541 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16" exitCode=0 Nov 26 15:29:42 crc kubenswrapper[5010]: I1126 15:29:42.546677 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16"} Nov 26 15:29:42 crc kubenswrapper[5010]: I1126 15:29:42.546764 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"1650b5fcdb5fec219e6ddc5a70f9c5a7048a441e9afd0f1a5126d9bea6739360"} Nov 26 15:29:42 crc kubenswrapper[5010]: I1126 15:29:42.657179 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:29:43 crc kubenswrapper[5010]: I1126 15:29:43.553814 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hb82b" event={"ID":"b0730a77-df20-4d33-abd6-22de117337c3","Type":"ContainerStarted","Data":"fecd605a773056b5753a0e42ce1da4c304a070543a4b4198b3677a9abf0447a7"} Nov 26 15:29:43 crc kubenswrapper[5010]: I1126 15:29:43.555473 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vk2zl" event={"ID":"79dbc879-38d9-4605-b382-01eec0def0ee","Type":"ContainerStarted","Data":"ba5330701cf63a655b653ed274b8de884c8fd084e409194e072d0095e5e9603c"} Nov 26 15:29:44 crc kubenswrapper[5010]: I1126 15:29:44.019369 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qct4n"] Nov 26 15:29:44 crc kubenswrapper[5010]: I1126 15:29:44.562137 5010 generic.go:334] "Generic (PLEG): container finished" podID="b0730a77-df20-4d33-abd6-22de117337c3" containerID="fecd605a773056b5753a0e42ce1da4c304a070543a4b4198b3677a9abf0447a7" exitCode=0 Nov 26 15:29:44 crc kubenswrapper[5010]: I1126 15:29:44.562226 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hb82b" event={"ID":"b0730a77-df20-4d33-abd6-22de117337c3","Type":"ContainerDied","Data":"fecd605a773056b5753a0e42ce1da4c304a070543a4b4198b3677a9abf0447a7"} Nov 26 15:29:44 crc kubenswrapper[5010]: I1126 15:29:44.564579 5010 generic.go:334] "Generic (PLEG): container finished" podID="79dbc879-38d9-4605-b382-01eec0def0ee" containerID="ba5330701cf63a655b653ed274b8de884c8fd084e409194e072d0095e5e9603c" exitCode=0 Nov 26 15:29:44 crc kubenswrapper[5010]: I1126 15:29:44.564652 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vk2zl" event={"ID":"79dbc879-38d9-4605-b382-01eec0def0ee","Type":"ContainerDied","Data":"ba5330701cf63a655b653ed274b8de884c8fd084e409194e072d0095e5e9603c"} Nov 26 15:29:44 crc kubenswrapper[5010]: I1126 15:29:44.564834 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qct4n" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="registry-server" containerID="cri-o://141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b" gracePeriod=2 Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.051687 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.152788 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fp68r\" (UniqueName: \"kubernetes.io/projected/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-kube-api-access-fp68r\") pod \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.152859 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-catalog-content\") pod \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.153080 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-utilities\") pod \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\" (UID: \"3db4017e-aa19-4e80-a2bc-d1a682fe2e98\") " Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.153883 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-utilities" (OuterVolumeSpecName: "utilities") pod "3db4017e-aa19-4e80-a2bc-d1a682fe2e98" (UID: "3db4017e-aa19-4e80-a2bc-d1a682fe2e98"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.159327 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-kube-api-access-fp68r" (OuterVolumeSpecName: "kube-api-access-fp68r") pod "3db4017e-aa19-4e80-a2bc-d1a682fe2e98" (UID: "3db4017e-aa19-4e80-a2bc-d1a682fe2e98"). InnerVolumeSpecName "kube-api-access-fp68r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.254386 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fp68r\" (UniqueName: \"kubernetes.io/projected/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-kube-api-access-fp68r\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.254419 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.263071 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3db4017e-aa19-4e80-a2bc-d1a682fe2e98" (UID: "3db4017e-aa19-4e80-a2bc-d1a682fe2e98"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.355820 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db4017e-aa19-4e80-a2bc-d1a682fe2e98-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.593678 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vk2zl" event={"ID":"79dbc879-38d9-4605-b382-01eec0def0ee","Type":"ContainerStarted","Data":"a1240088e87018cb0a6728accdd821b9be4d1a6d73fef69415aa0ff59b2b95d4"} Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.599197 5010 generic.go:334] "Generic (PLEG): container finished" podID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerID="8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a" exitCode=0 Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.599309 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdg87" event={"ID":"3ab9d996-a6a7-4204-abc2-0ec28f6bc569","Type":"ContainerDied","Data":"8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a"} Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.606177 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hb82b" event={"ID":"b0730a77-df20-4d33-abd6-22de117337c3","Type":"ContainerStarted","Data":"a6609f100c345eb7e2e7b237cc57571bbd40caa0b5e3a6c9308cb997093cdc52"} Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.611507 5010 generic.go:334] "Generic (PLEG): container finished" podID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerID="e2706810f9dbd4df6f831e819f4e786e9cb2a95362ce590ed1ddb0330398c641" exitCode=0 Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.611579 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7jk7d" event={"ID":"5ca95312-780d-4552-9833-1ef36dd5d15d","Type":"ContainerDied","Data":"e2706810f9dbd4df6f831e819f4e786e9cb2a95362ce590ed1ddb0330398c641"} Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.624978 5010 generic.go:334] "Generic (PLEG): container finished" podID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerID="141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b" exitCode=0 Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.625032 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qct4n" event={"ID":"3db4017e-aa19-4e80-a2bc-d1a682fe2e98","Type":"ContainerDied","Data":"141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b"} Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.625066 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qct4n" event={"ID":"3db4017e-aa19-4e80-a2bc-d1a682fe2e98","Type":"ContainerDied","Data":"7d45406a8394ab1dbc9504e5cd57150623062fc2c7f575210b8ec185449c3673"} Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.625086 5010 scope.go:117] "RemoveContainer" containerID="141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.625309 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qct4n" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.642334 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vk2zl" podStartSLOduration=3.432366023 podStartE2EDuration="57.642312574s" podCreationTimestamp="2025-11-26 15:28:48 +0000 UTC" firstStartedPulling="2025-11-26 15:28:50.895266155 +0000 UTC m=+151.685983303" lastFinishedPulling="2025-11-26 15:29:45.105212706 +0000 UTC m=+205.895929854" observedRunningTime="2025-11-26 15:29:45.621629644 +0000 UTC m=+206.412346792" watchObservedRunningTime="2025-11-26 15:29:45.642312574 +0000 UTC m=+206.433029722" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.648044 5010 scope.go:117] "RemoveContainer" containerID="3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.678189 5010 scope.go:117] "RemoveContainer" containerID="9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.679781 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hb82b" podStartSLOduration=4.27938484 podStartE2EDuration="58.67976667s" podCreationTimestamp="2025-11-26 15:28:47 +0000 UTC" firstStartedPulling="2025-11-26 15:28:50.835513016 +0000 UTC m=+151.626230154" lastFinishedPulling="2025-11-26 15:29:45.235894836 +0000 UTC m=+206.026611984" observedRunningTime="2025-11-26 15:29:45.662906852 +0000 UTC m=+206.453624000" watchObservedRunningTime="2025-11-26 15:29:45.67976667 +0000 UTC m=+206.470483818" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.701922 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qct4n"] Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.704300 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qct4n"] Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.711905 5010 scope.go:117] "RemoveContainer" containerID="141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b" Nov 26 15:29:45 crc kubenswrapper[5010]: E1126 15:29:45.712595 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b\": container with ID starting with 141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b not found: ID does not exist" containerID="141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.712649 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b"} err="failed to get container status \"141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b\": rpc error: code = NotFound desc = could not find container \"141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b\": container with ID starting with 141b5008c1726b9a9edbdeaa9ec7dbf8466733ae9ca2a46dda5f077e4d1fbb5b not found: ID does not exist" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.712682 5010 scope.go:117] "RemoveContainer" containerID="3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067" Nov 26 15:29:45 crc kubenswrapper[5010]: E1126 15:29:45.713095 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067\": container with ID starting with 3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067 not found: ID does not exist" containerID="3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.713115 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067"} err="failed to get container status \"3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067\": rpc error: code = NotFound desc = could not find container \"3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067\": container with ID starting with 3bd0aac506ec09d7678bf54fd142bafaac72c53dbbbea6c1967242c8b3708067 not found: ID does not exist" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.713130 5010 scope.go:117] "RemoveContainer" containerID="9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e" Nov 26 15:29:45 crc kubenswrapper[5010]: E1126 15:29:45.716103 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e\": container with ID starting with 9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e not found: ID does not exist" containerID="9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.716124 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e"} err="failed to get container status \"9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e\": rpc error: code = NotFound desc = could not find container \"9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e\": container with ID starting with 9b0a435dd7072b028a7b9714c6cb5ed85d6b3dce641a071ebdca83bed14cca3e not found: ID does not exist" Nov 26 15:29:45 crc kubenswrapper[5010]: I1126 15:29:45.898669 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" path="/var/lib/kubelet/pods/3db4017e-aa19-4e80-a2bc-d1a682fe2e98/volumes" Nov 26 15:29:46 crc kubenswrapper[5010]: I1126 15:29:46.646883 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdg87" event={"ID":"3ab9d996-a6a7-4204-abc2-0ec28f6bc569","Type":"ContainerStarted","Data":"226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109"} Nov 26 15:29:46 crc kubenswrapper[5010]: I1126 15:29:46.651681 5010 generic.go:334] "Generic (PLEG): container finished" podID="e9847f64-a32c-494f-8a71-283b25184c19" containerID="789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d" exitCode=0 Nov 26 15:29:46 crc kubenswrapper[5010]: I1126 15:29:46.651755 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8fjs" event={"ID":"e9847f64-a32c-494f-8a71-283b25184c19","Type":"ContainerDied","Data":"789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d"} Nov 26 15:29:46 crc kubenswrapper[5010]: I1126 15:29:46.667411 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bdg87" podStartSLOduration=2.360238187 podStartE2EDuration="56.667387311s" podCreationTimestamp="2025-11-26 15:28:50 +0000 UTC" firstStartedPulling="2025-11-26 15:28:51.952930098 +0000 UTC m=+152.743647236" lastFinishedPulling="2025-11-26 15:29:46.260079212 +0000 UTC m=+207.050796360" observedRunningTime="2025-11-26 15:29:46.665255949 +0000 UTC m=+207.455973087" watchObservedRunningTime="2025-11-26 15:29:46.667387311 +0000 UTC m=+207.458104459" Nov 26 15:29:47 crc kubenswrapper[5010]: I1126 15:29:47.661439 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8fjs" event={"ID":"e9847f64-a32c-494f-8a71-283b25184c19","Type":"ContainerStarted","Data":"4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06"} Nov 26 15:29:47 crc kubenswrapper[5010]: I1126 15:29:47.663662 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7jk7d" event={"ID":"5ca95312-780d-4552-9833-1ef36dd5d15d","Type":"ContainerStarted","Data":"e40597ca8638b3e6499bff41f02c59fed163848fa4567f71a3a00c3797bb197f"} Nov 26 15:29:47 crc kubenswrapper[5010]: I1126 15:29:47.693652 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k8fjs" podStartSLOduration=4.490493702 podStartE2EDuration="1m0.693631267s" podCreationTimestamp="2025-11-26 15:28:47 +0000 UTC" firstStartedPulling="2025-11-26 15:28:50.866640006 +0000 UTC m=+151.657357154" lastFinishedPulling="2025-11-26 15:29:47.069777581 +0000 UTC m=+207.860494719" observedRunningTime="2025-11-26 15:29:47.691505075 +0000 UTC m=+208.482222223" watchObservedRunningTime="2025-11-26 15:29:47.693631267 +0000 UTC m=+208.484348415" Nov 26 15:29:47 crc kubenswrapper[5010]: I1126 15:29:47.716556 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7jk7d" podStartSLOduration=3.739061047 podStartE2EDuration="1m0.71654s" podCreationTimestamp="2025-11-26 15:28:47 +0000 UTC" firstStartedPulling="2025-11-26 15:28:49.550199959 +0000 UTC m=+150.340917107" lastFinishedPulling="2025-11-26 15:29:46.527678912 +0000 UTC m=+207.318396060" observedRunningTime="2025-11-26 15:29:47.714221494 +0000 UTC m=+208.504938642" watchObservedRunningTime="2025-11-26 15:29:47.71654 +0000 UTC m=+208.507257148" Nov 26 15:29:47 crc kubenswrapper[5010]: I1126 15:29:47.934767 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:29:47 crc kubenswrapper[5010]: I1126 15:29:47.934834 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.231953 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.233095 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.274800 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.398857 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.398909 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.533621 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.533665 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.571657 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.670862 5010 generic.go:334] "Generic (PLEG): container finished" podID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerID="06e856e6684e6f3d59516b6b88d38dcf939a0793cea8d83874327cd7cd151786" exitCode=0 Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.670994 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t88lc" event={"ID":"91353bbb-798f-47cc-96b2-0dfeee2938f0","Type":"ContainerDied","Data":"06e856e6684e6f3d59516b6b88d38dcf939a0793cea8d83874327cd7cd151786"} Nov 26 15:29:48 crc kubenswrapper[5010]: I1126 15:29:48.972760 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-7jk7d" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="registry-server" probeResult="failure" output=< Nov 26 15:29:48 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 15:29:48 crc kubenswrapper[5010]: > Nov 26 15:29:49 crc kubenswrapper[5010]: I1126 15:29:49.441408 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-k8fjs" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="registry-server" probeResult="failure" output=< Nov 26 15:29:49 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 15:29:49 crc kubenswrapper[5010]: > Nov 26 15:29:49 crc kubenswrapper[5010]: I1126 15:29:49.680003 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t88lc" event={"ID":"91353bbb-798f-47cc-96b2-0dfeee2938f0","Type":"ContainerStarted","Data":"7caec6b8db7951c90944aeffcf1711aa86957850d3f3e98447601b080e706d3e"} Nov 26 15:29:49 crc kubenswrapper[5010]: I1126 15:29:49.700767 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-t88lc" podStartSLOduration=3.512604578 podStartE2EDuration="59.700745611s" podCreationTimestamp="2025-11-26 15:28:50 +0000 UTC" firstStartedPulling="2025-11-26 15:28:53.020082898 +0000 UTC m=+153.810800046" lastFinishedPulling="2025-11-26 15:29:49.208223931 +0000 UTC m=+209.998941079" observedRunningTime="2025-11-26 15:29:49.698922877 +0000 UTC m=+210.489640035" watchObservedRunningTime="2025-11-26 15:29:49.700745611 +0000 UTC m=+210.491462769" Nov 26 15:29:50 crc kubenswrapper[5010]: I1126 15:29:50.378252 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:29:50 crc kubenswrapper[5010]: I1126 15:29:50.429394 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:29:50 crc kubenswrapper[5010]: I1126 15:29:50.429473 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:29:50 crc kubenswrapper[5010]: I1126 15:29:50.471555 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:29:51 crc kubenswrapper[5010]: I1126 15:29:51.416342 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:29:51 crc kubenswrapper[5010]: I1126 15:29:51.416913 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:29:52 crc kubenswrapper[5010]: I1126 15:29:52.465361 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-t88lc" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="registry-server" probeResult="failure" output=< Nov 26 15:29:52 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 15:29:52 crc kubenswrapper[5010]: > Nov 26 15:29:57 crc kubenswrapper[5010]: I1126 15:29:57.989841 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:29:58 crc kubenswrapper[5010]: I1126 15:29:58.052106 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:29:58 crc kubenswrapper[5010]: I1126 15:29:58.301931 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:29:58 crc kubenswrapper[5010]: I1126 15:29:58.443374 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:29:58 crc kubenswrapper[5010]: I1126 15:29:58.495453 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:29:58 crc kubenswrapper[5010]: I1126 15:29:58.593875 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:29:59 crc kubenswrapper[5010]: I1126 15:29:59.148308 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k8fjs"] Nov 26 15:29:59 crc kubenswrapper[5010]: I1126 15:29:59.741125 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k8fjs" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="registry-server" containerID="cri-o://4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06" gracePeriod=2 Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.154023 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t"] Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.154842 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="extract-content" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.154861 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="extract-content" Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.154904 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="155bc56e-ae99-43b4-86cf-83800e3cff58" containerName="pruner" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.154915 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="155bc56e-ae99-43b4-86cf-83800e3cff58" containerName="pruner" Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.154928 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26bc380f-7be2-473d-98b2-9acb6b8309c6" containerName="pruner" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.154936 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="26bc380f-7be2-473d-98b2-9acb6b8309c6" containerName="pruner" Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.154948 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="registry-server" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.154956 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="registry-server" Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.155067 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="extract-utilities" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.155077 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="extract-utilities" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.155273 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3db4017e-aa19-4e80-a2bc-d1a682fe2e98" containerName="registry-server" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.155313 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="155bc56e-ae99-43b4-86cf-83800e3cff58" containerName="pruner" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.155329 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="26bc380f-7be2-473d-98b2-9acb6b8309c6" containerName="pruner" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.156022 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.159818 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.160471 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.161194 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t"] Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.177855 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7khw\" (UniqueName: \"kubernetes.io/projected/f9803034-f657-474c-aad0-4d2cfc54ed20-kube-api-access-v7khw\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.177913 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f9803034-f657-474c-aad0-4d2cfc54ed20-secret-volume\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.177960 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f9803034-f657-474c-aad0-4d2cfc54ed20-config-volume\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.285856 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7khw\" (UniqueName: \"kubernetes.io/projected/f9803034-f657-474c-aad0-4d2cfc54ed20-kube-api-access-v7khw\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.285912 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f9803034-f657-474c-aad0-4d2cfc54ed20-secret-volume\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.286023 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f9803034-f657-474c-aad0-4d2cfc54ed20-config-volume\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.287022 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f9803034-f657-474c-aad0-4d2cfc54ed20-config-volume\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.301593 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7khw\" (UniqueName: \"kubernetes.io/projected/f9803034-f657-474c-aad0-4d2cfc54ed20-kube-api-access-v7khw\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.301592 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f9803034-f657-474c-aad0-4d2cfc54ed20-secret-volume\") pod \"collect-profiles-29402850-qqc6t\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.416895 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.469831 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.479163 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.487861 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-catalog-content\") pod \"e9847f64-a32c-494f-8a71-283b25184c19\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.487949 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-utilities\") pod \"e9847f64-a32c-494f-8a71-283b25184c19\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.487997 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9prrp\" (UniqueName: \"kubernetes.io/projected/e9847f64-a32c-494f-8a71-283b25184c19-kube-api-access-9prrp\") pod \"e9847f64-a32c-494f-8a71-283b25184c19\" (UID: \"e9847f64-a32c-494f-8a71-283b25184c19\") " Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.490008 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-utilities" (OuterVolumeSpecName: "utilities") pod "e9847f64-a32c-494f-8a71-283b25184c19" (UID: "e9847f64-a32c-494f-8a71-283b25184c19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.492283 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9847f64-a32c-494f-8a71-283b25184c19-kube-api-access-9prrp" (OuterVolumeSpecName: "kube-api-access-9prrp") pod "e9847f64-a32c-494f-8a71-283b25184c19" (UID: "e9847f64-a32c-494f-8a71-283b25184c19"). InnerVolumeSpecName "kube-api-access-9prrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.547547 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vk2zl"] Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.547893 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vk2zl" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="registry-server" containerID="cri-o://a1240088e87018cb0a6728accdd821b9be4d1a6d73fef69415aa0ff59b2b95d4" gracePeriod=2 Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.551687 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e9847f64-a32c-494f-8a71-283b25184c19" (UID: "e9847f64-a32c-494f-8a71-283b25184c19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.590050 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.590102 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e9847f64-a32c-494f-8a71-283b25184c19-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.590121 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9prrp\" (UniqueName: \"kubernetes.io/projected/e9847f64-a32c-494f-8a71-283b25184c19-kube-api-access-9prrp\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.678295 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t"] Nov 26 15:30:00 crc kubenswrapper[5010]: W1126 15:30:00.695400 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9803034_f657_474c_aad0_4d2cfc54ed20.slice/crio-7dd92a9011ab1f78a2efff44b14a9f357e26faf1e110cc16ce37588b0d25687f WatchSource:0}: Error finding container 7dd92a9011ab1f78a2efff44b14a9f357e26faf1e110cc16ce37588b0d25687f: Status 404 returned error can't find the container with id 7dd92a9011ab1f78a2efff44b14a9f357e26faf1e110cc16ce37588b0d25687f Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.748933 5010 generic.go:334] "Generic (PLEG): container finished" podID="e9847f64-a32c-494f-8a71-283b25184c19" containerID="4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06" exitCode=0 Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.749006 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8fjs" event={"ID":"e9847f64-a32c-494f-8a71-283b25184c19","Type":"ContainerDied","Data":"4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06"} Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.749040 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8fjs" event={"ID":"e9847f64-a32c-494f-8a71-283b25184c19","Type":"ContainerDied","Data":"e8644509a19cf01418717f0e0cfdd7ca45a0a8956f810a5394aeb2095cc70f0b"} Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.749033 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8fjs" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.749060 5010 scope.go:117] "RemoveContainer" containerID="4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.776757 5010 generic.go:334] "Generic (PLEG): container finished" podID="79dbc879-38d9-4605-b382-01eec0def0ee" containerID="a1240088e87018cb0a6728accdd821b9be4d1a6d73fef69415aa0ff59b2b95d4" exitCode=0 Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.776848 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vk2zl" event={"ID":"79dbc879-38d9-4605-b382-01eec0def0ee","Type":"ContainerDied","Data":"a1240088e87018cb0a6728accdd821b9be4d1a6d73fef69415aa0ff59b2b95d4"} Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.777960 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" event={"ID":"f9803034-f657-474c-aad0-4d2cfc54ed20","Type":"ContainerStarted","Data":"7dd92a9011ab1f78a2efff44b14a9f357e26faf1e110cc16ce37588b0d25687f"} Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.794957 5010 scope.go:117] "RemoveContainer" containerID="789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.802415 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k8fjs"] Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.806530 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k8fjs"] Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.835535 5010 scope.go:117] "RemoveContainer" containerID="4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.857858 5010 scope.go:117] "RemoveContainer" containerID="4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06" Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.858310 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06\": container with ID starting with 4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06 not found: ID does not exist" containerID="4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.858382 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06"} err="failed to get container status \"4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06\": rpc error: code = NotFound desc = could not find container \"4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06\": container with ID starting with 4c1b91c21d8c57a729f4572b19a929c709c4e30a454a715daa27852299647f06 not found: ID does not exist" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.858416 5010 scope.go:117] "RemoveContainer" containerID="789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d" Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.858944 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d\": container with ID starting with 789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d not found: ID does not exist" containerID="789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.858986 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d"} err="failed to get container status \"789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d\": rpc error: code = NotFound desc = could not find container \"789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d\": container with ID starting with 789fea2c7483f8d6b33b248bbe2085ec509274be24c23fbbe797f3ad49804f9d not found: ID does not exist" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.859022 5010 scope.go:117] "RemoveContainer" containerID="4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480" Nov 26 15:30:00 crc kubenswrapper[5010]: E1126 15:30:00.859300 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480\": container with ID starting with 4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480 not found: ID does not exist" containerID="4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.859320 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480"} err="failed to get container status \"4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480\": rpc error: code = NotFound desc = could not find container \"4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480\": container with ID starting with 4c21c3a2f5d61441f17eeb72051cbb5777b5b896f368b16c311d77e4823b0480 not found: ID does not exist" Nov 26 15:30:00 crc kubenswrapper[5010]: I1126 15:30:00.907689 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:00.999644 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-utilities\") pod \"79dbc879-38d9-4605-b382-01eec0def0ee\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:00.999735 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-czgjm\" (UniqueName: \"kubernetes.io/projected/79dbc879-38d9-4605-b382-01eec0def0ee-kube-api-access-czgjm\") pod \"79dbc879-38d9-4605-b382-01eec0def0ee\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:00.999767 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-catalog-content\") pod \"79dbc879-38d9-4605-b382-01eec0def0ee\" (UID: \"79dbc879-38d9-4605-b382-01eec0def0ee\") " Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.000768 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-utilities" (OuterVolumeSpecName: "utilities") pod "79dbc879-38d9-4605-b382-01eec0def0ee" (UID: "79dbc879-38d9-4605-b382-01eec0def0ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.013808 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79dbc879-38d9-4605-b382-01eec0def0ee-kube-api-access-czgjm" (OuterVolumeSpecName: "kube-api-access-czgjm") pod "79dbc879-38d9-4605-b382-01eec0def0ee" (UID: "79dbc879-38d9-4605-b382-01eec0def0ee"). InnerVolumeSpecName "kube-api-access-czgjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.075962 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79dbc879-38d9-4605-b382-01eec0def0ee" (UID: "79dbc879-38d9-4605-b382-01eec0def0ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.101268 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.101313 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-czgjm\" (UniqueName: \"kubernetes.io/projected/79dbc879-38d9-4605-b382-01eec0def0ee-kube-api-access-czgjm\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.101326 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79dbc879-38d9-4605-b382-01eec0def0ee-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.236883 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gw7ld"] Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.474433 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.520853 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.786789 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vk2zl" event={"ID":"79dbc879-38d9-4605-b382-01eec0def0ee","Type":"ContainerDied","Data":"7c8119c41edec4968aad13037f27553bf70b48fc7a78f69effe16e187032f5b6"} Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.786823 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vk2zl" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.786846 5010 scope.go:117] "RemoveContainer" containerID="a1240088e87018cb0a6728accdd821b9be4d1a6d73fef69415aa0ff59b2b95d4" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.788412 5010 generic.go:334] "Generic (PLEG): container finished" podID="f9803034-f657-474c-aad0-4d2cfc54ed20" containerID="cdafb804e1ec93e2e611d9ba99b33effcf9a33d8328ef2a09032dfaedb11c394" exitCode=0 Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.788523 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" event={"ID":"f9803034-f657-474c-aad0-4d2cfc54ed20","Type":"ContainerDied","Data":"cdafb804e1ec93e2e611d9ba99b33effcf9a33d8328ef2a09032dfaedb11c394"} Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.805847 5010 scope.go:117] "RemoveContainer" containerID="ba5330701cf63a655b653ed274b8de884c8fd084e409194e072d0095e5e9603c" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.811182 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vk2zl"] Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.814002 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vk2zl"] Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.843921 5010 scope.go:117] "RemoveContainer" containerID="c56b40795f323c0b41866e09fba43a86c5b48c74b226f75aa85fd6bb560ea51e" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.913043 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" path="/var/lib/kubelet/pods/79dbc879-38d9-4605-b382-01eec0def0ee/volumes" Nov 26 15:30:01 crc kubenswrapper[5010]: I1126 15:30:01.913865 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9847f64-a32c-494f-8a71-283b25184c19" path="/var/lib/kubelet/pods/e9847f64-a32c-494f-8a71-283b25184c19/volumes" Nov 26 15:30:02 crc kubenswrapper[5010]: I1126 15:30:02.949667 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bdg87"] Nov 26 15:30:02 crc kubenswrapper[5010]: I1126 15:30:02.950409 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bdg87" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="registry-server" containerID="cri-o://226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109" gracePeriod=2 Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.157636 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.228043 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f9803034-f657-474c-aad0-4d2cfc54ed20-secret-volume\") pod \"f9803034-f657-474c-aad0-4d2cfc54ed20\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.228083 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f9803034-f657-474c-aad0-4d2cfc54ed20-config-volume\") pod \"f9803034-f657-474c-aad0-4d2cfc54ed20\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.228112 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7khw\" (UniqueName: \"kubernetes.io/projected/f9803034-f657-474c-aad0-4d2cfc54ed20-kube-api-access-v7khw\") pod \"f9803034-f657-474c-aad0-4d2cfc54ed20\" (UID: \"f9803034-f657-474c-aad0-4d2cfc54ed20\") " Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.234845 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9803034-f657-474c-aad0-4d2cfc54ed20-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f9803034-f657-474c-aad0-4d2cfc54ed20" (UID: "f9803034-f657-474c-aad0-4d2cfc54ed20"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.235313 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9803034-f657-474c-aad0-4d2cfc54ed20-config-volume" (OuterVolumeSpecName: "config-volume") pod "f9803034-f657-474c-aad0-4d2cfc54ed20" (UID: "f9803034-f657-474c-aad0-4d2cfc54ed20"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.240959 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9803034-f657-474c-aad0-4d2cfc54ed20-kube-api-access-v7khw" (OuterVolumeSpecName: "kube-api-access-v7khw") pod "f9803034-f657-474c-aad0-4d2cfc54ed20" (UID: "f9803034-f657-474c-aad0-4d2cfc54ed20"). InnerVolumeSpecName "kube-api-access-v7khw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.310193 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.329479 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-catalog-content\") pod \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.329557 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-utilities\") pod \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.329592 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjnrs\" (UniqueName: \"kubernetes.io/projected/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-kube-api-access-jjnrs\") pod \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\" (UID: \"3ab9d996-a6a7-4204-abc2-0ec28f6bc569\") " Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.329849 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f9803034-f657-474c-aad0-4d2cfc54ed20-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.329871 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f9803034-f657-474c-aad0-4d2cfc54ed20-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.329885 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7khw\" (UniqueName: \"kubernetes.io/projected/f9803034-f657-474c-aad0-4d2cfc54ed20-kube-api-access-v7khw\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.331359 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-utilities" (OuterVolumeSpecName: "utilities") pod "3ab9d996-a6a7-4204-abc2-0ec28f6bc569" (UID: "3ab9d996-a6a7-4204-abc2-0ec28f6bc569"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.333559 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-kube-api-access-jjnrs" (OuterVolumeSpecName: "kube-api-access-jjnrs") pod "3ab9d996-a6a7-4204-abc2-0ec28f6bc569" (UID: "3ab9d996-a6a7-4204-abc2-0ec28f6bc569"). InnerVolumeSpecName "kube-api-access-jjnrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.345396 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3ab9d996-a6a7-4204-abc2-0ec28f6bc569" (UID: "3ab9d996-a6a7-4204-abc2-0ec28f6bc569"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.431542 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.431585 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.431599 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjnrs\" (UniqueName: \"kubernetes.io/projected/3ab9d996-a6a7-4204-abc2-0ec28f6bc569-kube-api-access-jjnrs\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.806328 5010 generic.go:334] "Generic (PLEG): container finished" podID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerID="226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109" exitCode=0 Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.806418 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdg87" event={"ID":"3ab9d996-a6a7-4204-abc2-0ec28f6bc569","Type":"ContainerDied","Data":"226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109"} Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.806456 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bdg87" event={"ID":"3ab9d996-a6a7-4204-abc2-0ec28f6bc569","Type":"ContainerDied","Data":"77e9b137e9c2b630b261c54fe075682e7f69f07cffa35382e18aef721d86ff74"} Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.806469 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bdg87" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.806489 5010 scope.go:117] "RemoveContainer" containerID="226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.808384 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" event={"ID":"f9803034-f657-474c-aad0-4d2cfc54ed20","Type":"ContainerDied","Data":"7dd92a9011ab1f78a2efff44b14a9f357e26faf1e110cc16ce37588b0d25687f"} Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.808427 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dd92a9011ab1f78a2efff44b14a9f357e26faf1e110cc16ce37588b0d25687f" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.808496 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.830943 5010 scope.go:117] "RemoveContainer" containerID="8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.852807 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bdg87"] Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.856951 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bdg87"] Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.875683 5010 scope.go:117] "RemoveContainer" containerID="77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.896280 5010 scope.go:117] "RemoveContainer" containerID="226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109" Nov 26 15:30:03 crc kubenswrapper[5010]: E1126 15:30:03.896728 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109\": container with ID starting with 226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109 not found: ID does not exist" containerID="226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.896763 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109"} err="failed to get container status \"226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109\": rpc error: code = NotFound desc = could not find container \"226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109\": container with ID starting with 226edadcffab44aed6422bf46fe629418ebd3469d6ae7cab69aeab0afef21109 not found: ID does not exist" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.896783 5010 scope.go:117] "RemoveContainer" containerID="8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a" Nov 26 15:30:03 crc kubenswrapper[5010]: E1126 15:30:03.897200 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a\": container with ID starting with 8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a not found: ID does not exist" containerID="8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.897252 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a"} err="failed to get container status \"8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a\": rpc error: code = NotFound desc = could not find container \"8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a\": container with ID starting with 8f94a1f6b977e81021fbc6bf3385e487d123cf833d24324310f3202c5c03db4a not found: ID does not exist" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.897284 5010 scope.go:117] "RemoveContainer" containerID="77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28" Nov 26 15:30:03 crc kubenswrapper[5010]: E1126 15:30:03.897676 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28\": container with ID starting with 77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28 not found: ID does not exist" containerID="77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.897737 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28"} err="failed to get container status \"77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28\": rpc error: code = NotFound desc = could not find container \"77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28\": container with ID starting with 77130e8486f3bfbcb7b8fbd234037241662949a8b6ba40d615770ff253dc1d28 not found: ID does not exist" Nov 26 15:30:03 crc kubenswrapper[5010]: I1126 15:30:03.899845 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" path="/var/lib/kubelet/pods/3ab9d996-a6a7-4204-abc2-0ec28f6bc569/volumes" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.267801 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" podUID="3d1c114a-859f-4dd2-8bd5-79f55b713703" containerName="oauth-openshift" containerID="cri-o://cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b" gracePeriod=15 Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.755816 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812218 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-796b4dcc4-8t9zx"] Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812617 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="extract-utilities" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812653 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="extract-utilities" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812671 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="extract-utilities" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812685 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="extract-utilities" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812701 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d1c114a-859f-4dd2-8bd5-79f55b713703" containerName="oauth-openshift" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812741 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d1c114a-859f-4dd2-8bd5-79f55b713703" containerName="oauth-openshift" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812761 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812776 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812794 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9803034-f657-474c-aad0-4d2cfc54ed20" containerName="collect-profiles" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812806 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9803034-f657-474c-aad0-4d2cfc54ed20" containerName="collect-profiles" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812830 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812843 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812867 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812879 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812895 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="extract-utilities" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812909 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="extract-utilities" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812923 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="extract-content" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812935 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="extract-content" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812956 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="extract-content" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812968 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="extract-content" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.812981 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="extract-content" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.812993 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="extract-content" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.813165 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9847f64-a32c-494f-8a71-283b25184c19" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.813189 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="79dbc879-38d9-4605-b382-01eec0def0ee" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.813206 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9803034-f657-474c-aad0-4d2cfc54ed20" containerName="collect-profiles" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.813220 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ab9d996-a6a7-4204-abc2-0ec28f6bc569" containerName="registry-server" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.813243 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d1c114a-859f-4dd2-8bd5-79f55b713703" containerName="oauth-openshift" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.813969 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.908622 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-796b4dcc4-8t9zx"] Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.927488 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-dir\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.927643 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5j97m\" (UniqueName: \"kubernetes.io/projected/3d1c114a-859f-4dd2-8bd5-79f55b713703-kube-api-access-5j97m\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.927649 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.927754 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-service-ca\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.927813 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-router-certs\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.927873 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-cliconfig\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.927925 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-error\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928007 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-ocp-branding-template\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928162 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-idp-0-file-data\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928233 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-login\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928277 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-policies\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928314 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-session\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928398 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-provider-selection\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928464 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-trusted-ca-bundle\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.928512 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-serving-cert\") pod \"3d1c114a-859f-4dd2-8bd5-79f55b713703\" (UID: \"3d1c114a-859f-4dd2-8bd5-79f55b713703\") " Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.929293 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.929436 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.930411 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.930661 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-session\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.930758 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-audit-policies\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.930813 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-cliconfig\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.930895 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.930941 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.930993 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.931051 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-serving-cert\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.931628 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-trxc7\" (UniqueName: \"kubernetes.io/projected/4c155699-ec94-4772-940f-c5e8a9cb3396-kube-api-access-trxc7\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.931890 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-error\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.931955 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932078 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932355 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-login\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932439 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4c155699-ec94-4772-940f-c5e8a9cb3396-audit-dir\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932487 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-router-certs\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932539 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-service-ca\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932612 5010 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932637 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932660 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932685 5010 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.932733 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.936378 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d1c114a-859f-4dd2-8bd5-79f55b713703-kube-api-access-5j97m" (OuterVolumeSpecName: "kube-api-access-5j97m") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "kube-api-access-5j97m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.936484 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.941246 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.941489 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.942184 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.948090 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.948264 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.949018 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.949236 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "3d1c114a-859f-4dd2-8bd5-79f55b713703" (UID: "3d1c114a-859f-4dd2-8bd5-79f55b713703"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.967644 5010 generic.go:334] "Generic (PLEG): container finished" podID="3d1c114a-859f-4dd2-8bd5-79f55b713703" containerID="cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b" exitCode=0 Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.967739 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" event={"ID":"3d1c114a-859f-4dd2-8bd5-79f55b713703","Type":"ContainerDied","Data":"cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b"} Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.967779 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" event={"ID":"3d1c114a-859f-4dd2-8bd5-79f55b713703","Type":"ContainerDied","Data":"44e3efd94150d355b7b6de3a3b7e03e0c4a069f3259de69675815a30520c6b0b"} Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.967797 5010 scope.go:117] "RemoveContainer" containerID="cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.967809 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-gw7ld" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.990619 5010 scope.go:117] "RemoveContainer" containerID="cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b" Nov 26 15:30:26 crc kubenswrapper[5010]: E1126 15:30:26.991073 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b\": container with ID starting with cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b not found: ID does not exist" containerID="cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.991138 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b"} err="failed to get container status \"cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b\": rpc error: code = NotFound desc = could not find container \"cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b\": container with ID starting with cd865f6661fc00585c97ba376c522f98e69a6c203790a97d0ec0fdf4c3c9e77b not found: ID does not exist" Nov 26 15:30:26 crc kubenswrapper[5010]: I1126 15:30:26.997291 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gw7ld"] Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.007217 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-gw7ld"] Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034087 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034254 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-serving-cert\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034316 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-trxc7\" (UniqueName: \"kubernetes.io/projected/4c155699-ec94-4772-940f-c5e8a9cb3396-kube-api-access-trxc7\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034365 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-error\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034413 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034480 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-login\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034548 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4c155699-ec94-4772-940f-c5e8a9cb3396-audit-dir\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034590 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-router-certs\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034646 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-service-ca\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034696 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-session\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034693 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4c155699-ec94-4772-940f-c5e8a9cb3396-audit-dir\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034780 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-audit-policies\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034830 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-cliconfig\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034885 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034925 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.034999 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035033 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035057 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035080 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5j97m\" (UniqueName: \"kubernetes.io/projected/3d1c114a-859f-4dd2-8bd5-79f55b713703-kube-api-access-5j97m\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035101 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035125 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035147 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035171 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035194 5010 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3d1c114a-859f-4dd2-8bd5-79f55b713703-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035668 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035728 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-service-ca\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.035734 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-audit-policies\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.036149 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-cliconfig\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.039758 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-router-certs\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.040002 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.040485 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-serving-cert\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.040571 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.041266 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-session\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.042394 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-login\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.043249 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.043281 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4c155699-ec94-4772-940f-c5e8a9cb3396-v4-0-config-user-template-error\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.051947 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-trxc7\" (UniqueName: \"kubernetes.io/projected/4c155699-ec94-4772-940f-c5e8a9cb3396-kube-api-access-trxc7\") pod \"oauth-openshift-796b4dcc4-8t9zx\" (UID: \"4c155699-ec94-4772-940f-c5e8a9cb3396\") " pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.158797 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.425057 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-796b4dcc4-8t9zx"] Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.904460 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d1c114a-859f-4dd2-8bd5-79f55b713703" path="/var/lib/kubelet/pods/3d1c114a-859f-4dd2-8bd5-79f55b713703/volumes" Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.982045 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" event={"ID":"4c155699-ec94-4772-940f-c5e8a9cb3396","Type":"ContainerStarted","Data":"5df1c3f3d162858e2c3417a93305f9f4d92d7e605d1ca60080ac4fba72bd0b28"} Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.982098 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" event={"ID":"4c155699-ec94-4772-940f-c5e8a9cb3396","Type":"ContainerStarted","Data":"35dba1b0cba304e49e7693b3bdb54b3cefbb36ae56637fd7e5fc0a372a161316"} Nov 26 15:30:27 crc kubenswrapper[5010]: I1126 15:30:27.982373 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:28 crc kubenswrapper[5010]: I1126 15:30:28.024362 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" podStartSLOduration=27.024339654 podStartE2EDuration="27.024339654s" podCreationTimestamp="2025-11-26 15:30:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:30:28.020120202 +0000 UTC m=+248.810837380" watchObservedRunningTime="2025-11-26 15:30:28.024339654 +0000 UTC m=+248.815056832" Nov 26 15:30:28 crc kubenswrapper[5010]: I1126 15:30:28.184691 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-796b4dcc4-8t9zx" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.468525 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7jk7d"] Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.469292 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7jk7d" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="registry-server" containerID="cri-o://e40597ca8638b3e6499bff41f02c59fed163848fa4567f71a3a00c3797bb197f" gracePeriod=30 Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.487644 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hb82b"] Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.487995 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hb82b" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="registry-server" containerID="cri-o://a6609f100c345eb7e2e7b237cc57571bbd40caa0b5e3a6c9308cb997093cdc52" gracePeriod=30 Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.491952 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mr9qp"] Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.492238 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" podUID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" containerName="marketplace-operator" containerID="cri-o://da7466b664f286bad12abd26f9b06c34b6f1c06bbd4bd53154fca4cec40748e9" gracePeriod=30 Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.501367 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfxxn"] Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.502029 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kfxxn" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="registry-server" containerID="cri-o://752a54f60ee6c1c44082e68ad2cd36b78ba2792eecb0950aaf8eabd16dab631d" gracePeriod=30 Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.517212 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-z7kzh"] Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.518894 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.523914 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t88lc"] Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.524227 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-t88lc" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="registry-server" containerID="cri-o://7caec6b8db7951c90944aeffcf1711aa86957850d3f3e98447601b080e706d3e" gracePeriod=30 Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.528107 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-z7kzh"] Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.552917 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jg28m\" (UniqueName: \"kubernetes.io/projected/7ade2b88-da36-4267-a6b2-f6917eaaca43-kube-api-access-jg28m\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.553028 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7ade2b88-da36-4267-a6b2-f6917eaaca43-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.553156 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7ade2b88-da36-4267-a6b2-f6917eaaca43-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.653928 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jg28m\" (UniqueName: \"kubernetes.io/projected/7ade2b88-da36-4267-a6b2-f6917eaaca43-kube-api-access-jg28m\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.653981 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7ade2b88-da36-4267-a6b2-f6917eaaca43-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.654009 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7ade2b88-da36-4267-a6b2-f6917eaaca43-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.657072 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7ade2b88-da36-4267-a6b2-f6917eaaca43-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.661166 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7ade2b88-da36-4267-a6b2-f6917eaaca43-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.671925 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jg28m\" (UniqueName: \"kubernetes.io/projected/7ade2b88-da36-4267-a6b2-f6917eaaca43-kube-api-access-jg28m\") pod \"marketplace-operator-79b997595-z7kzh\" (UID: \"7ade2b88-da36-4267-a6b2-f6917eaaca43\") " pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:51 crc kubenswrapper[5010]: I1126 15:30:51.848845 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.140608 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-z7kzh"] Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.159905 5010 generic.go:334] "Generic (PLEG): container finished" podID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" containerID="da7466b664f286bad12abd26f9b06c34b6f1c06bbd4bd53154fca4cec40748e9" exitCode=0 Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.159999 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" event={"ID":"7e26d790-6dd2-4e6e-8e21-8b791f39744e","Type":"ContainerDied","Data":"da7466b664f286bad12abd26f9b06c34b6f1c06bbd4bd53154fca4cec40748e9"} Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.174881 5010 generic.go:334] "Generic (PLEG): container finished" podID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerID="7caec6b8db7951c90944aeffcf1711aa86957850d3f3e98447601b080e706d3e" exitCode=0 Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.174987 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t88lc" event={"ID":"91353bbb-798f-47cc-96b2-0dfeee2938f0","Type":"ContainerDied","Data":"7caec6b8db7951c90944aeffcf1711aa86957850d3f3e98447601b080e706d3e"} Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.212791 5010 generic.go:334] "Generic (PLEG): container finished" podID="b0730a77-df20-4d33-abd6-22de117337c3" containerID="a6609f100c345eb7e2e7b237cc57571bbd40caa0b5e3a6c9308cb997093cdc52" exitCode=0 Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.212939 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hb82b" event={"ID":"b0730a77-df20-4d33-abd6-22de117337c3","Type":"ContainerDied","Data":"a6609f100c345eb7e2e7b237cc57571bbd40caa0b5e3a6c9308cb997093cdc52"} Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.215871 5010 generic.go:334] "Generic (PLEG): container finished" podID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerID="752a54f60ee6c1c44082e68ad2cd36b78ba2792eecb0950aaf8eabd16dab631d" exitCode=0 Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.215951 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfxxn" event={"ID":"a358d6b9-52e1-4088-9141-44059aa6e3af","Type":"ContainerDied","Data":"752a54f60ee6c1c44082e68ad2cd36b78ba2792eecb0950aaf8eabd16dab631d"} Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.219894 5010 generic.go:334] "Generic (PLEG): container finished" podID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerID="e40597ca8638b3e6499bff41f02c59fed163848fa4567f71a3a00c3797bb197f" exitCode=0 Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.219920 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7jk7d" event={"ID":"5ca95312-780d-4552-9833-1ef36dd5d15d","Type":"ContainerDied","Data":"e40597ca8638b3e6499bff41f02c59fed163848fa4567f71a3a00c3797bb197f"} Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.334111 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.362975 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-catalog-content\") pod \"b0730a77-df20-4d33-abd6-22de117337c3\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.363131 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmgfh\" (UniqueName: \"kubernetes.io/projected/b0730a77-df20-4d33-abd6-22de117337c3-kube-api-access-gmgfh\") pod \"b0730a77-df20-4d33-abd6-22de117337c3\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.363167 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-utilities\") pod \"b0730a77-df20-4d33-abd6-22de117337c3\" (UID: \"b0730a77-df20-4d33-abd6-22de117337c3\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.364413 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-utilities" (OuterVolumeSpecName: "utilities") pod "b0730a77-df20-4d33-abd6-22de117337c3" (UID: "b0730a77-df20-4d33-abd6-22de117337c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.374857 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0730a77-df20-4d33-abd6-22de117337c3-kube-api-access-gmgfh" (OuterVolumeSpecName: "kube-api-access-gmgfh") pod "b0730a77-df20-4d33-abd6-22de117337c3" (UID: "b0730a77-df20-4d33-abd6-22de117337c3"). InnerVolumeSpecName "kube-api-access-gmgfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.430226 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0730a77-df20-4d33-abd6-22de117337c3" (UID: "b0730a77-df20-4d33-abd6-22de117337c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.454236 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.488247 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlf58\" (UniqueName: \"kubernetes.io/projected/a358d6b9-52e1-4088-9141-44059aa6e3af-kube-api-access-wlf58\") pod \"a358d6b9-52e1-4088-9141-44059aa6e3af\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.488775 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-catalog-content\") pod \"a358d6b9-52e1-4088-9141-44059aa6e3af\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.488976 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-utilities\") pod \"a358d6b9-52e1-4088-9141-44059aa6e3af\" (UID: \"a358d6b9-52e1-4088-9141-44059aa6e3af\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.490085 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmgfh\" (UniqueName: \"kubernetes.io/projected/b0730a77-df20-4d33-abd6-22de117337c3-kube-api-access-gmgfh\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.490134 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.490147 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0730a77-df20-4d33-abd6-22de117337c3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.490459 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-utilities" (OuterVolumeSpecName: "utilities") pod "a358d6b9-52e1-4088-9141-44059aa6e3af" (UID: "a358d6b9-52e1-4088-9141-44059aa6e3af"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.510407 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a358d6b9-52e1-4088-9141-44059aa6e3af-kube-api-access-wlf58" (OuterVolumeSpecName: "kube-api-access-wlf58") pod "a358d6b9-52e1-4088-9141-44059aa6e3af" (UID: "a358d6b9-52e1-4088-9141-44059aa6e3af"). InnerVolumeSpecName "kube-api-access-wlf58". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.544838 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a358d6b9-52e1-4088-9141-44059aa6e3af" (UID: "a358d6b9-52e1-4088-9141-44059aa6e3af"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.567012 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.574969 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.591037 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.591212 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlf58\" (UniqueName: \"kubernetes.io/projected/a358d6b9-52e1-4088-9141-44059aa6e3af-kube-api-access-wlf58\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.591274 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a358d6b9-52e1-4088-9141-44059aa6e3af-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.619520 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.692291 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-utilities\") pod \"91353bbb-798f-47cc-96b2-0dfeee2938f0\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.692720 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-utilities\") pod \"5ca95312-780d-4552-9833-1ef36dd5d15d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.692851 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-operator-metrics\") pod \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.692937 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7qb5\" (UniqueName: \"kubernetes.io/projected/7e26d790-6dd2-4e6e-8e21-8b791f39744e-kube-api-access-c7qb5\") pod \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.693033 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4npnx\" (UniqueName: \"kubernetes.io/projected/5ca95312-780d-4552-9833-1ef36dd5d15d-kube-api-access-4npnx\") pod \"5ca95312-780d-4552-9833-1ef36dd5d15d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.693114 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77n5g\" (UniqueName: \"kubernetes.io/projected/91353bbb-798f-47cc-96b2-0dfeee2938f0-kube-api-access-77n5g\") pod \"91353bbb-798f-47cc-96b2-0dfeee2938f0\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.693265 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-catalog-content\") pod \"91353bbb-798f-47cc-96b2-0dfeee2938f0\" (UID: \"91353bbb-798f-47cc-96b2-0dfeee2938f0\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.693381 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-catalog-content\") pod \"5ca95312-780d-4552-9833-1ef36dd5d15d\" (UID: \"5ca95312-780d-4552-9833-1ef36dd5d15d\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.693471 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-trusted-ca\") pod \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\" (UID: \"7e26d790-6dd2-4e6e-8e21-8b791f39744e\") " Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.694973 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "7e26d790-6dd2-4e6e-8e21-8b791f39744e" (UID: "7e26d790-6dd2-4e6e-8e21-8b791f39744e"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.695823 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-utilities" (OuterVolumeSpecName: "utilities") pod "91353bbb-798f-47cc-96b2-0dfeee2938f0" (UID: "91353bbb-798f-47cc-96b2-0dfeee2938f0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.696583 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-utilities" (OuterVolumeSpecName: "utilities") pod "5ca95312-780d-4552-9833-1ef36dd5d15d" (UID: "5ca95312-780d-4552-9833-1ef36dd5d15d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.700853 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91353bbb-798f-47cc-96b2-0dfeee2938f0-kube-api-access-77n5g" (OuterVolumeSpecName: "kube-api-access-77n5g") pod "91353bbb-798f-47cc-96b2-0dfeee2938f0" (UID: "91353bbb-798f-47cc-96b2-0dfeee2938f0"). InnerVolumeSpecName "kube-api-access-77n5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.702179 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ca95312-780d-4552-9833-1ef36dd5d15d-kube-api-access-4npnx" (OuterVolumeSpecName: "kube-api-access-4npnx") pod "5ca95312-780d-4552-9833-1ef36dd5d15d" (UID: "5ca95312-780d-4552-9833-1ef36dd5d15d"). InnerVolumeSpecName "kube-api-access-4npnx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.707137 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "7e26d790-6dd2-4e6e-8e21-8b791f39744e" (UID: "7e26d790-6dd2-4e6e-8e21-8b791f39744e"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.713449 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e26d790-6dd2-4e6e-8e21-8b791f39744e-kube-api-access-c7qb5" (OuterVolumeSpecName: "kube-api-access-c7qb5") pod "7e26d790-6dd2-4e6e-8e21-8b791f39744e" (UID: "7e26d790-6dd2-4e6e-8e21-8b791f39744e"). InnerVolumeSpecName "kube-api-access-c7qb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.774596 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5ca95312-780d-4552-9833-1ef36dd5d15d" (UID: "5ca95312-780d-4552-9833-1ef36dd5d15d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.790934 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "91353bbb-798f-47cc-96b2-0dfeee2938f0" (UID: "91353bbb-798f-47cc-96b2-0dfeee2938f0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.795605 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.795719 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.795798 5010 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.795879 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/91353bbb-798f-47cc-96b2-0dfeee2938f0-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.795940 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5ca95312-780d-4552-9833-1ef36dd5d15d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.796010 5010 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/7e26d790-6dd2-4e6e-8e21-8b791f39744e-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.796067 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7qb5\" (UniqueName: \"kubernetes.io/projected/7e26d790-6dd2-4e6e-8e21-8b791f39744e-kube-api-access-c7qb5\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.796130 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4npnx\" (UniqueName: \"kubernetes.io/projected/5ca95312-780d-4552-9833-1ef36dd5d15d-kube-api-access-4npnx\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:52 crc kubenswrapper[5010]: I1126 15:30:52.796185 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77n5g\" (UniqueName: \"kubernetes.io/projected/91353bbb-798f-47cc-96b2-0dfeee2938f0-kube-api-access-77n5g\") on node \"crc\" DevicePath \"\"" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.231551 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-t88lc" event={"ID":"91353bbb-798f-47cc-96b2-0dfeee2938f0","Type":"ContainerDied","Data":"f479abaafe10b7d57ee684a9ad2332d02b68474fc059294b931e9fae3b5d410d"} Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.231640 5010 scope.go:117] "RemoveContainer" containerID="7caec6b8db7951c90944aeffcf1711aa86957850d3f3e98447601b080e706d3e" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.231859 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-t88lc" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.234786 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" event={"ID":"7ade2b88-da36-4267-a6b2-f6917eaaca43","Type":"ContainerStarted","Data":"32e508a55537e5d1e6124662c0e1be78de079c91c412024ae0d21be0b6d81334"} Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.234835 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" event={"ID":"7ade2b88-da36-4267-a6b2-f6917eaaca43","Type":"ContainerStarted","Data":"305986a4b83cee9101f66dde72f28491d175e22ff233ac8734f721216d4f442e"} Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.235429 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.238866 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.241160 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hb82b" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.241158 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hb82b" event={"ID":"b0730a77-df20-4d33-abd6-22de117337c3","Type":"ContainerDied","Data":"0a03f8e117a1a99eb1be0f45b46f39932a067742af98f94a34ebaaf5d3a9b873"} Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.244776 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kfxxn" event={"ID":"a358d6b9-52e1-4088-9141-44059aa6e3af","Type":"ContainerDied","Data":"2c632f0c0d577a5b0db64ab0e842be8184795ba69f36b1c7fe149c9b50f5bff9"} Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.244922 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kfxxn" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.248165 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7jk7d" event={"ID":"5ca95312-780d-4552-9833-1ef36dd5d15d","Type":"ContainerDied","Data":"1a22c9f34f8766bd0331924f7124cbfd5431028f19408e01b820671be9503998"} Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.248283 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7jk7d" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.249834 5010 scope.go:117] "RemoveContainer" containerID="06e856e6684e6f3d59516b6b88d38dcf939a0793cea8d83874327cd7cd151786" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.254740 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" event={"ID":"7e26d790-6dd2-4e6e-8e21-8b791f39744e","Type":"ContainerDied","Data":"d0651332f018ba1df07392f32ba6557ada76566ccc7718be65a99273e2530240"} Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.254917 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mr9qp" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.258671 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-z7kzh" podStartSLOduration=2.258632688 podStartE2EDuration="2.258632688s" podCreationTimestamp="2025-11-26 15:30:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:30:53.256598457 +0000 UTC m=+274.047315615" watchObservedRunningTime="2025-11-26 15:30:53.258632688 +0000 UTC m=+274.049349836" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.276552 5010 scope.go:117] "RemoveContainer" containerID="0d698fadb22a53d7e7b37408f09489ca97e77dabcc1a37c540ad81c66a9d777e" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.337854 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfxxn"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.339548 5010 scope.go:117] "RemoveContainer" containerID="a6609f100c345eb7e2e7b237cc57571bbd40caa0b5e3a6c9308cb997093cdc52" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.346241 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kfxxn"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.358314 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-t88lc"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.360552 5010 scope.go:117] "RemoveContainer" containerID="fecd605a773056b5753a0e42ce1da4c304a070543a4b4198b3677a9abf0447a7" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.361674 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-t88lc"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.381945 5010 scope.go:117] "RemoveContainer" containerID="c7c7c348dfa4fdb35cdc0a3f012d9ec5bc5b45f1d41e5d5dbe7e8ed473cb7df7" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.382153 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hb82b"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.385839 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hb82b"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.388354 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mr9qp"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.390737 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mr9qp"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.393051 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7jk7d"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.395137 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7jk7d"] Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.398254 5010 scope.go:117] "RemoveContainer" containerID="752a54f60ee6c1c44082e68ad2cd36b78ba2792eecb0950aaf8eabd16dab631d" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.423572 5010 scope.go:117] "RemoveContainer" containerID="3d50a99f82dec924dfa5ef66844bcde9d8aaaa7a4ad3015a32ec4cac8c0c220e" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.441270 5010 scope.go:117] "RemoveContainer" containerID="1d7b1afb522b768f7cf5f164b80a53bc6b6d6f521565b82f3600c0dde0f25adb" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.454440 5010 scope.go:117] "RemoveContainer" containerID="e40597ca8638b3e6499bff41f02c59fed163848fa4567f71a3a00c3797bb197f" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.469023 5010 scope.go:117] "RemoveContainer" containerID="e2706810f9dbd4df6f831e819f4e786e9cb2a95362ce590ed1ddb0330398c641" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.485123 5010 scope.go:117] "RemoveContainer" containerID="eec43d6ee694e5ec0c4fa04eae693d1ecee499683bf6a8d2a7d6608202fa87ca" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.500866 5010 scope.go:117] "RemoveContainer" containerID="da7466b664f286bad12abd26f9b06c34b6f1c06bbd4bd53154fca4cec40748e9" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.886664 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-pn4nw"] Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887519 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887548 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887566 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887578 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887591 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887601 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887619 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887628 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887639 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887647 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887658 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887668 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887681 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887690 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887702 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887733 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="extract-content" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887745 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887756 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887773 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" containerName="marketplace-operator" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887784 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" containerName="marketplace-operator" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887799 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887808 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887820 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887829 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="extract-utilities" Nov 26 15:30:53 crc kubenswrapper[5010]: E1126 15:30:53.887844 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.887853 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.888030 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.888048 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.888065 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.888080 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" containerName="marketplace-operator" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.888089 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0730a77-df20-4d33-abd6-22de117337c3" containerName="registry-server" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.888991 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.895178 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.917433 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-catalog-content\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.917618 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-utilities\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.917777 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zq74c\" (UniqueName: \"kubernetes.io/projected/22f55318-df99-4764-82aa-2240fea5d0ca-kube-api-access-zq74c\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.929706 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ca95312-780d-4552-9833-1ef36dd5d15d" path="/var/lib/kubelet/pods/5ca95312-780d-4552-9833-1ef36dd5d15d/volumes" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.931185 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e26d790-6dd2-4e6e-8e21-8b791f39744e" path="/var/lib/kubelet/pods/7e26d790-6dd2-4e6e-8e21-8b791f39744e/volumes" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.931876 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91353bbb-798f-47cc-96b2-0dfeee2938f0" path="/var/lib/kubelet/pods/91353bbb-798f-47cc-96b2-0dfeee2938f0/volumes" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.933286 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a358d6b9-52e1-4088-9141-44059aa6e3af" path="/var/lib/kubelet/pods/a358d6b9-52e1-4088-9141-44059aa6e3af/volumes" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.934318 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0730a77-df20-4d33-abd6-22de117337c3" path="/var/lib/kubelet/pods/b0730a77-df20-4d33-abd6-22de117337c3/volumes" Nov 26 15:30:53 crc kubenswrapper[5010]: I1126 15:30:53.935368 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pn4nw"] Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.019093 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-utilities\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.019198 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zq74c\" (UniqueName: \"kubernetes.io/projected/22f55318-df99-4764-82aa-2240fea5d0ca-kube-api-access-zq74c\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.019272 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-catalog-content\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.020028 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-catalog-content\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.020021 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-utilities\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.047758 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zq74c\" (UniqueName: \"kubernetes.io/projected/22f55318-df99-4764-82aa-2240fea5d0ca-kube-api-access-zq74c\") pod \"certified-operators-pn4nw\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.227156 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:30:54 crc kubenswrapper[5010]: I1126 15:30:54.692397 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-pn4nw"] Nov 26 15:30:54 crc kubenswrapper[5010]: W1126 15:30:54.705832 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22f55318_df99_4764_82aa_2240fea5d0ca.slice/crio-7c5132ca3d8968373b65bafb1e9ae6ad1e8f8ccda975d5afa499f61ac81ba390 WatchSource:0}: Error finding container 7c5132ca3d8968373b65bafb1e9ae6ad1e8f8ccda975d5afa499f61ac81ba390: Status 404 returned error can't find the container with id 7c5132ca3d8968373b65bafb1e9ae6ad1e8f8ccda975d5afa499f61ac81ba390 Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.277520 5010 generic.go:334] "Generic (PLEG): container finished" podID="22f55318-df99-4764-82aa-2240fea5d0ca" containerID="b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a" exitCode=0 Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.277619 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pn4nw" event={"ID":"22f55318-df99-4764-82aa-2240fea5d0ca","Type":"ContainerDied","Data":"b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a"} Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.278082 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pn4nw" event={"ID":"22f55318-df99-4764-82aa-2240fea5d0ca","Type":"ContainerStarted","Data":"7c5132ca3d8968373b65bafb1e9ae6ad1e8f8ccda975d5afa499f61ac81ba390"} Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.702900 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-th8t9"] Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.708333 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.711428 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.712420 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-th8t9"] Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.747963 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr6pd\" (UniqueName: \"kubernetes.io/projected/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-kube-api-access-pr6pd\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.748099 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-utilities\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.748139 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-catalog-content\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.849442 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-utilities\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.849513 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-catalog-content\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.849621 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr6pd\" (UniqueName: \"kubernetes.io/projected/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-kube-api-access-pr6pd\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.850289 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-utilities\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.850309 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-catalog-content\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:55 crc kubenswrapper[5010]: I1126 15:30:55.875279 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr6pd\" (UniqueName: \"kubernetes.io/projected/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-kube-api-access-pr6pd\") pod \"redhat-operators-th8t9\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.063241 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.291066 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mqwcz"] Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.293996 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.296758 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.301322 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mqwcz"] Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.458850 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-catalog-content\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.458892 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vzww\" (UniqueName: \"kubernetes.io/projected/90967318-80f9-4d7a-81f5-78978bc25ab8-kube-api-access-7vzww\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.458915 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-utilities\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.484756 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-th8t9"] Nov 26 15:30:56 crc kubenswrapper[5010]: W1126 15:30:56.494331 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8a6e2eaf_7fdd_4dd0_96ef_0f4b026e0aec.slice/crio-061a907c809eb8abed368ca02ac82f51d0bb13e650a69930bbdccfdc8d85a617 WatchSource:0}: Error finding container 061a907c809eb8abed368ca02ac82f51d0bb13e650a69930bbdccfdc8d85a617: Status 404 returned error can't find the container with id 061a907c809eb8abed368ca02ac82f51d0bb13e650a69930bbdccfdc8d85a617 Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.560636 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-catalog-content\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.561289 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vzww\" (UniqueName: \"kubernetes.io/projected/90967318-80f9-4d7a-81f5-78978bc25ab8-kube-api-access-7vzww\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.561318 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-utilities\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.561873 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-utilities\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.563156 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-catalog-content\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.587503 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vzww\" (UniqueName: \"kubernetes.io/projected/90967318-80f9-4d7a-81f5-78978bc25ab8-kube-api-access-7vzww\") pod \"community-operators-mqwcz\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:56 crc kubenswrapper[5010]: I1126 15:30:56.615226 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.035929 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mqwcz"] Nov 26 15:30:57 crc kubenswrapper[5010]: W1126 15:30:57.042267 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90967318_80f9_4d7a_81f5_78978bc25ab8.slice/crio-0fc226de84263ab753dfab427261a9a7e4b254805844730d3022fe3e37171797 WatchSource:0}: Error finding container 0fc226de84263ab753dfab427261a9a7e4b254805844730d3022fe3e37171797: Status 404 returned error can't find the container with id 0fc226de84263ab753dfab427261a9a7e4b254805844730d3022fe3e37171797 Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.300906 5010 generic.go:334] "Generic (PLEG): container finished" podID="22f55318-df99-4764-82aa-2240fea5d0ca" containerID="35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c" exitCode=0 Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.300989 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pn4nw" event={"ID":"22f55318-df99-4764-82aa-2240fea5d0ca","Type":"ContainerDied","Data":"35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c"} Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.317113 5010 generic.go:334] "Generic (PLEG): container finished" podID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerID="a47fba56f4f89d049a9c9942ba981bcb043b0ce77b2c8bfcaf81126f9ff6be1b" exitCode=0 Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.317383 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mqwcz" event={"ID":"90967318-80f9-4d7a-81f5-78978bc25ab8","Type":"ContainerDied","Data":"a47fba56f4f89d049a9c9942ba981bcb043b0ce77b2c8bfcaf81126f9ff6be1b"} Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.317590 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mqwcz" event={"ID":"90967318-80f9-4d7a-81f5-78978bc25ab8","Type":"ContainerStarted","Data":"0fc226de84263ab753dfab427261a9a7e4b254805844730d3022fe3e37171797"} Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.323842 5010 generic.go:334] "Generic (PLEG): container finished" podID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerID="1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee" exitCode=0 Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.323888 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-th8t9" event={"ID":"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec","Type":"ContainerDied","Data":"1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee"} Nov 26 15:30:57 crc kubenswrapper[5010]: I1126 15:30:57.323922 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-th8t9" event={"ID":"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec","Type":"ContainerStarted","Data":"061a907c809eb8abed368ca02ac82f51d0bb13e650a69930bbdccfdc8d85a617"} Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.092100 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-g5kfv"] Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.098231 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.100353 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g5kfv"] Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.103336 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.286579 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a73ed2-ad4b-4ebc-882c-7564f81058a5-catalog-content\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.286645 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a73ed2-ad4b-4ebc-882c-7564f81058a5-utilities\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.287349 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gmv2\" (UniqueName: \"kubernetes.io/projected/55a73ed2-ad4b-4ebc-882c-7564f81058a5-kube-api-access-7gmv2\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.332246 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pn4nw" event={"ID":"22f55318-df99-4764-82aa-2240fea5d0ca","Type":"ContainerStarted","Data":"9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677"} Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.334637 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mqwcz" event={"ID":"90967318-80f9-4d7a-81f5-78978bc25ab8","Type":"ContainerStarted","Data":"76d8ddb9385df6e6a13e2ee4a4de5037115eeb9aa34d8ec021d6534ece00a5c8"} Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.352855 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-pn4nw" podStartSLOduration=2.7442637579999998 podStartE2EDuration="5.352832441s" podCreationTimestamp="2025-11-26 15:30:53 +0000 UTC" firstStartedPulling="2025-11-26 15:30:55.2814912 +0000 UTC m=+276.072208348" lastFinishedPulling="2025-11-26 15:30:57.890059883 +0000 UTC m=+278.680777031" observedRunningTime="2025-11-26 15:30:58.352499663 +0000 UTC m=+279.143216811" watchObservedRunningTime="2025-11-26 15:30:58.352832441 +0000 UTC m=+279.143549599" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.388389 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a73ed2-ad4b-4ebc-882c-7564f81058a5-catalog-content\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.388435 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a73ed2-ad4b-4ebc-882c-7564f81058a5-utilities\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.388464 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gmv2\" (UniqueName: \"kubernetes.io/projected/55a73ed2-ad4b-4ebc-882c-7564f81058a5-kube-api-access-7gmv2\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.391186 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/55a73ed2-ad4b-4ebc-882c-7564f81058a5-utilities\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.391204 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/55a73ed2-ad4b-4ebc-882c-7564f81058a5-catalog-content\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.409586 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gmv2\" (UniqueName: \"kubernetes.io/projected/55a73ed2-ad4b-4ebc-882c-7564f81058a5-kube-api-access-7gmv2\") pod \"redhat-marketplace-g5kfv\" (UID: \"55a73ed2-ad4b-4ebc-882c-7564f81058a5\") " pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.467703 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:30:58 crc kubenswrapper[5010]: I1126 15:30:58.882023 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-g5kfv"] Nov 26 15:30:59 crc kubenswrapper[5010]: I1126 15:30:59.342121 5010 generic.go:334] "Generic (PLEG): container finished" podID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerID="13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff" exitCode=0 Nov 26 15:30:59 crc kubenswrapper[5010]: I1126 15:30:59.342380 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-th8t9" event={"ID":"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec","Type":"ContainerDied","Data":"13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff"} Nov 26 15:30:59 crc kubenswrapper[5010]: I1126 15:30:59.349864 5010 generic.go:334] "Generic (PLEG): container finished" podID="55a73ed2-ad4b-4ebc-882c-7564f81058a5" containerID="494ddf93eca4e70ad416b00fad92c649ddb6c69d67e5eef68377c8069ce2c1d2" exitCode=0 Nov 26 15:30:59 crc kubenswrapper[5010]: I1126 15:30:59.349917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g5kfv" event={"ID":"55a73ed2-ad4b-4ebc-882c-7564f81058a5","Type":"ContainerDied","Data":"494ddf93eca4e70ad416b00fad92c649ddb6c69d67e5eef68377c8069ce2c1d2"} Nov 26 15:30:59 crc kubenswrapper[5010]: I1126 15:30:59.350005 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g5kfv" event={"ID":"55a73ed2-ad4b-4ebc-882c-7564f81058a5","Type":"ContainerStarted","Data":"a0004d5da8c796b93bb33a04e91adb14d2d57de80d0705ebc8dd700250850ec5"} Nov 26 15:30:59 crc kubenswrapper[5010]: I1126 15:30:59.354391 5010 generic.go:334] "Generic (PLEG): container finished" podID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerID="76d8ddb9385df6e6a13e2ee4a4de5037115eeb9aa34d8ec021d6534ece00a5c8" exitCode=0 Nov 26 15:30:59 crc kubenswrapper[5010]: I1126 15:30:59.354539 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mqwcz" event={"ID":"90967318-80f9-4d7a-81f5-78978bc25ab8","Type":"ContainerDied","Data":"76d8ddb9385df6e6a13e2ee4a4de5037115eeb9aa34d8ec021d6534ece00a5c8"} Nov 26 15:31:00 crc kubenswrapper[5010]: I1126 15:31:00.362345 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mqwcz" event={"ID":"90967318-80f9-4d7a-81f5-78978bc25ab8","Type":"ContainerStarted","Data":"4cdcadc4c51fdcb805528a5f52d9bc52363241f36af0106bb2cee4bae95ceb61"} Nov 26 15:31:00 crc kubenswrapper[5010]: I1126 15:31:00.372248 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-th8t9" event={"ID":"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec","Type":"ContainerStarted","Data":"843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325"} Nov 26 15:31:00 crc kubenswrapper[5010]: I1126 15:31:00.383060 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mqwcz" podStartSLOduration=1.679475372 podStartE2EDuration="4.38303773s" podCreationTimestamp="2025-11-26 15:30:56 +0000 UTC" firstStartedPulling="2025-11-26 15:30:57.320842419 +0000 UTC m=+278.111559587" lastFinishedPulling="2025-11-26 15:31:00.024404797 +0000 UTC m=+280.815121945" observedRunningTime="2025-11-26 15:31:00.382197749 +0000 UTC m=+281.172914917" watchObservedRunningTime="2025-11-26 15:31:00.38303773 +0000 UTC m=+281.173754878" Nov 26 15:31:00 crc kubenswrapper[5010]: I1126 15:31:00.399114 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-th8t9" podStartSLOduration=2.800182375 podStartE2EDuration="5.399091123s" podCreationTimestamp="2025-11-26 15:30:55 +0000 UTC" firstStartedPulling="2025-11-26 15:30:57.326168223 +0000 UTC m=+278.116885391" lastFinishedPulling="2025-11-26 15:30:59.925076961 +0000 UTC m=+280.715794139" observedRunningTime="2025-11-26 15:31:00.398191151 +0000 UTC m=+281.188908309" watchObservedRunningTime="2025-11-26 15:31:00.399091123 +0000 UTC m=+281.189808271" Nov 26 15:31:02 crc kubenswrapper[5010]: I1126 15:31:02.390882 5010 generic.go:334] "Generic (PLEG): container finished" podID="55a73ed2-ad4b-4ebc-882c-7564f81058a5" containerID="1c48a6884fefbf98b9eebff17b91949bc190fdcfdb85fa20a02d7bfcffd70224" exitCode=0 Nov 26 15:31:02 crc kubenswrapper[5010]: I1126 15:31:02.391046 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g5kfv" event={"ID":"55a73ed2-ad4b-4ebc-882c-7564f81058a5","Type":"ContainerDied","Data":"1c48a6884fefbf98b9eebff17b91949bc190fdcfdb85fa20a02d7bfcffd70224"} Nov 26 15:31:03 crc kubenswrapper[5010]: I1126 15:31:03.400608 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-g5kfv" event={"ID":"55a73ed2-ad4b-4ebc-882c-7564f81058a5","Type":"ContainerStarted","Data":"5b998d12f6def036b7979460277024bedfe59cca0f600180cc01c78de5f3bc31"} Nov 26 15:31:03 crc kubenswrapper[5010]: I1126 15:31:03.431881 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-g5kfv" podStartSLOduration=1.72808574 podStartE2EDuration="5.431853114s" podCreationTimestamp="2025-11-26 15:30:58 +0000 UTC" firstStartedPulling="2025-11-26 15:30:59.351416575 +0000 UTC m=+280.142133763" lastFinishedPulling="2025-11-26 15:31:03.055183989 +0000 UTC m=+283.845901137" observedRunningTime="2025-11-26 15:31:03.427176507 +0000 UTC m=+284.217893645" watchObservedRunningTime="2025-11-26 15:31:03.431853114 +0000 UTC m=+284.222570282" Nov 26 15:31:04 crc kubenswrapper[5010]: I1126 15:31:04.228126 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:31:04 crc kubenswrapper[5010]: I1126 15:31:04.228201 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:31:04 crc kubenswrapper[5010]: I1126 15:31:04.306527 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:31:04 crc kubenswrapper[5010]: I1126 15:31:04.460117 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:31:06 crc kubenswrapper[5010]: I1126 15:31:06.064818 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:31:06 crc kubenswrapper[5010]: I1126 15:31:06.065480 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:31:06 crc kubenswrapper[5010]: I1126 15:31:06.129544 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:31:06 crc kubenswrapper[5010]: I1126 15:31:06.483593 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 15:31:06 crc kubenswrapper[5010]: I1126 15:31:06.624538 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:31:06 crc kubenswrapper[5010]: I1126 15:31:06.624946 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:31:06 crc kubenswrapper[5010]: I1126 15:31:06.689910 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:31:07 crc kubenswrapper[5010]: I1126 15:31:07.473955 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 15:31:08 crc kubenswrapper[5010]: I1126 15:31:08.468100 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:31:08 crc kubenswrapper[5010]: I1126 15:31:08.468489 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:31:08 crc kubenswrapper[5010]: I1126 15:31:08.523271 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:31:09 crc kubenswrapper[5010]: I1126 15:31:09.473360 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-g5kfv" Nov 26 15:31:41 crc kubenswrapper[5010]: I1126 15:31:41.423406 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:31:41 crc kubenswrapper[5010]: I1126 15:31:41.424336 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:32:11 crc kubenswrapper[5010]: I1126 15:32:11.423197 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:32:11 crc kubenswrapper[5010]: I1126 15:32:11.423836 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:32:41 crc kubenswrapper[5010]: I1126 15:32:41.423372 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:32:41 crc kubenswrapper[5010]: I1126 15:32:41.424103 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:32:41 crc kubenswrapper[5010]: I1126 15:32:41.424172 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:32:41 crc kubenswrapper[5010]: I1126 15:32:41.424988 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1650b5fcdb5fec219e6ddc5a70f9c5a7048a441e9afd0f1a5126d9bea6739360"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:32:41 crc kubenswrapper[5010]: I1126 15:32:41.425077 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://1650b5fcdb5fec219e6ddc5a70f9c5a7048a441e9afd0f1a5126d9bea6739360" gracePeriod=600 Nov 26 15:32:42 crc kubenswrapper[5010]: I1126 15:32:42.119113 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="1650b5fcdb5fec219e6ddc5a70f9c5a7048a441e9afd0f1a5126d9bea6739360" exitCode=0 Nov 26 15:32:42 crc kubenswrapper[5010]: I1126 15:32:42.119247 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"1650b5fcdb5fec219e6ddc5a70f9c5a7048a441e9afd0f1a5126d9bea6739360"} Nov 26 15:32:42 crc kubenswrapper[5010]: I1126 15:32:42.119799 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"cb7781e50e97e8aaffc05be4d03baf866d7e4a9a0d796b777de62a3d4894cdb4"} Nov 26 15:32:42 crc kubenswrapper[5010]: I1126 15:32:42.119838 5010 scope.go:117] "RemoveContainer" containerID="851c1431fd36851973090520277e1f2774b084a7186fdde0290d0ef380e44b16" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.549732 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-2zjp4"] Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.551660 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.573562 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-2zjp4"] Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680428 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/aa06b927-af49-4b82-9ee9-02b40eb7a992-registry-certificates\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680492 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-registry-tls\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680576 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680626 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/aa06b927-af49-4b82-9ee9-02b40eb7a992-ca-trust-extracted\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680680 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aa06b927-af49-4b82-9ee9-02b40eb7a992-trusted-ca\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680731 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-bound-sa-token\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680757 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2gx9\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-kube-api-access-b2gx9\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.680911 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/aa06b927-af49-4b82-9ee9-02b40eb7a992-installation-pull-secrets\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.717307 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.782230 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-bound-sa-token\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.782270 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2gx9\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-kube-api-access-b2gx9\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.782294 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/aa06b927-af49-4b82-9ee9-02b40eb7a992-installation-pull-secrets\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.782322 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/aa06b927-af49-4b82-9ee9-02b40eb7a992-registry-certificates\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.782340 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-registry-tls\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.782383 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/aa06b927-af49-4b82-9ee9-02b40eb7a992-ca-trust-extracted\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.782408 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aa06b927-af49-4b82-9ee9-02b40eb7a992-trusted-ca\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.783786 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/aa06b927-af49-4b82-9ee9-02b40eb7a992-ca-trust-extracted\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.783862 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/aa06b927-af49-4b82-9ee9-02b40eb7a992-trusted-ca\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.785844 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/aa06b927-af49-4b82-9ee9-02b40eb7a992-registry-certificates\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.788894 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/aa06b927-af49-4b82-9ee9-02b40eb7a992-installation-pull-secrets\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.791291 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-registry-tls\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.798476 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2gx9\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-kube-api-access-b2gx9\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.799293 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/aa06b927-af49-4b82-9ee9-02b40eb7a992-bound-sa-token\") pod \"image-registry-66df7c8f76-2zjp4\" (UID: \"aa06b927-af49-4b82-9ee9-02b40eb7a992\") " pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:54 crc kubenswrapper[5010]: I1126 15:32:54.871827 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:55 crc kubenswrapper[5010]: I1126 15:32:55.148323 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-2zjp4"] Nov 26 15:32:55 crc kubenswrapper[5010]: W1126 15:32:55.155803 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa06b927_af49_4b82_9ee9_02b40eb7a992.slice/crio-454c954f01e49a76c912fc0da2f6a29be9803fc71ea2d6fe3ca389939b15de17 WatchSource:0}: Error finding container 454c954f01e49a76c912fc0da2f6a29be9803fc71ea2d6fe3ca389939b15de17: Status 404 returned error can't find the container with id 454c954f01e49a76c912fc0da2f6a29be9803fc71ea2d6fe3ca389939b15de17 Nov 26 15:32:55 crc kubenswrapper[5010]: I1126 15:32:55.216608 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" event={"ID":"aa06b927-af49-4b82-9ee9-02b40eb7a992","Type":"ContainerStarted","Data":"454c954f01e49a76c912fc0da2f6a29be9803fc71ea2d6fe3ca389939b15de17"} Nov 26 15:32:56 crc kubenswrapper[5010]: I1126 15:32:56.226134 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" event={"ID":"aa06b927-af49-4b82-9ee9-02b40eb7a992","Type":"ContainerStarted","Data":"574dd4191cdaa16be32fcf0dad697781e089a05003911d8b87c86a72d7dc2c4a"} Nov 26 15:32:56 crc kubenswrapper[5010]: I1126 15:32:56.226638 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:32:56 crc kubenswrapper[5010]: I1126 15:32:56.258081 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" podStartSLOduration=2.258056572 podStartE2EDuration="2.258056572s" podCreationTimestamp="2025-11-26 15:32:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:32:56.249095591 +0000 UTC m=+397.039812799" watchObservedRunningTime="2025-11-26 15:32:56.258056572 +0000 UTC m=+397.048773720" Nov 26 15:33:14 crc kubenswrapper[5010]: I1126 15:33:14.881902 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-2zjp4" Nov 26 15:33:14 crc kubenswrapper[5010]: I1126 15:33:14.991800 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7hblv"] Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.057460 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" podUID="9986e410-984a-466f-bb26-b1644bc6c976" containerName="registry" containerID="cri-o://629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37" gracePeriod=30 Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.508301 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543041 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-registry-certificates\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543090 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-trusted-ca\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543124 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-bound-sa-token\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543147 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9986e410-984a-466f-bb26-b1644bc6c976-ca-trust-extracted\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543271 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543301 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-79qbg\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-kube-api-access-79qbg\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543322 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9986e410-984a-466f-bb26-b1644bc6c976-installation-pull-secrets\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.543351 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-registry-tls\") pod \"9986e410-984a-466f-bb26-b1644bc6c976\" (UID: \"9986e410-984a-466f-bb26-b1644bc6c976\") " Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.544506 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.545075 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.552057 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9986e410-984a-466f-bb26-b1644bc6c976-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.552090 5010 generic.go:334] "Generic (PLEG): container finished" podID="9986e410-984a-466f-bb26-b1644bc6c976" containerID="629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37" exitCode=0 Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.552134 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" event={"ID":"9986e410-984a-466f-bb26-b1644bc6c976","Type":"ContainerDied","Data":"629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37"} Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.552159 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.552185 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" event={"ID":"9986e410-984a-466f-bb26-b1644bc6c976","Type":"ContainerDied","Data":"f3877539c886cf0ac476a94581889aea99892c90c79e412a27ad415e4f9e65b1"} Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.552211 5010 scope.go:117] "RemoveContainer" containerID="629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.554814 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.555319 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-kube-api-access-79qbg" (OuterVolumeSpecName: "kube-api-access-79qbg") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "kube-api-access-79qbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.558105 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.560216 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.571545 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9986e410-984a-466f-bb26-b1644bc6c976-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "9986e410-984a-466f-bb26-b1644bc6c976" (UID: "9986e410-984a-466f-bb26-b1644bc6c976"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.613295 5010 scope.go:117] "RemoveContainer" containerID="629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37" Nov 26 15:33:40 crc kubenswrapper[5010]: E1126 15:33:40.613971 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37\": container with ID starting with 629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37 not found: ID does not exist" containerID="629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.614049 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37"} err="failed to get container status \"629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37\": rpc error: code = NotFound desc = could not find container \"629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37\": container with ID starting with 629460f49d99760623033df657be729e071fb1f9f30639941462d7dccc1a9c37 not found: ID does not exist" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.645682 5010 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.645759 5010 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/9986e410-984a-466f-bb26-b1644bc6c976-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.645781 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-79qbg\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-kube-api-access-79qbg\") on node \"crc\" DevicePath \"\"" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.645803 5010 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/9986e410-984a-466f-bb26-b1644bc6c976-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.645823 5010 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/9986e410-984a-466f-bb26-b1644bc6c976-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.645844 5010 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.645862 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9986e410-984a-466f-bb26-b1644bc6c976-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.915373 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7hblv"] Nov 26 15:33:40 crc kubenswrapper[5010]: I1126 15:33:40.920592 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-7hblv"] Nov 26 15:33:41 crc kubenswrapper[5010]: I1126 15:33:41.907785 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9986e410-984a-466f-bb26-b1644bc6c976" path="/var/lib/kubelet/pods/9986e410-984a-466f-bb26-b1644bc6c976/volumes" Nov 26 15:33:45 crc kubenswrapper[5010]: I1126 15:33:45.498940 5010 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-7hblv container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.31:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 15:33:45 crc kubenswrapper[5010]: I1126 15:33:45.499090 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-697d97f7c8-7hblv" podUID="9986e410-984a-466f-bb26-b1644bc6c976" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.31:5000/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 15:34:41 crc kubenswrapper[5010]: I1126 15:34:41.423170 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:34:41 crc kubenswrapper[5010]: I1126 15:34:41.424588 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:35:11 crc kubenswrapper[5010]: I1126 15:35:11.423117 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:35:11 crc kubenswrapper[5010]: I1126 15:35:11.424284 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:35:41 crc kubenswrapper[5010]: I1126 15:35:41.423230 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:35:41 crc kubenswrapper[5010]: I1126 15:35:41.423770 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:35:41 crc kubenswrapper[5010]: I1126 15:35:41.423819 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:35:41 crc kubenswrapper[5010]: I1126 15:35:41.424441 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cb7781e50e97e8aaffc05be4d03baf866d7e4a9a0d796b777de62a3d4894cdb4"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:35:41 crc kubenswrapper[5010]: I1126 15:35:41.424509 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://cb7781e50e97e8aaffc05be4d03baf866d7e4a9a0d796b777de62a3d4894cdb4" gracePeriod=600 Nov 26 15:35:42 crc kubenswrapper[5010]: I1126 15:35:42.457431 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="cb7781e50e97e8aaffc05be4d03baf866d7e4a9a0d796b777de62a3d4894cdb4" exitCode=0 Nov 26 15:35:42 crc kubenswrapper[5010]: I1126 15:35:42.457529 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"cb7781e50e97e8aaffc05be4d03baf866d7e4a9a0d796b777de62a3d4894cdb4"} Nov 26 15:35:42 crc kubenswrapper[5010]: I1126 15:35:42.457884 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"07de4390fc3c8495bcdc1f46830e2b986f1ed25110c72eb2d6d31304d8ef46ee"} Nov 26 15:35:42 crc kubenswrapper[5010]: I1126 15:35:42.457908 5010 scope.go:117] "RemoveContainer" containerID="1650b5fcdb5fec219e6ddc5a70f9c5a7048a441e9afd0f1a5126d9bea6739360" Nov 26 15:37:41 crc kubenswrapper[5010]: I1126 15:37:41.423329 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:37:41 crc kubenswrapper[5010]: I1126 15:37:41.424099 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.371759 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hlqt9"] Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.372477 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-controller" containerID="cri-o://ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.372538 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="sbdb" containerID="cri-o://928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.372595 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="nbdb" containerID="cri-o://d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.372631 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="northd" containerID="cri-o://6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.372693 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-node" containerID="cri-o://37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.372796 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-acl-logging" containerID="cri-o://0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.372802 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.419833 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" containerID="cri-o://612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" gracePeriod=30 Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.734814 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/3.log" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.738016 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovn-acl-logging/0.log" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.738568 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovn-controller/0.log" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.739116 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798319 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hnjhm"] Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798593 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798608 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798620 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="nbdb" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798629 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="nbdb" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798643 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798652 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798667 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-acl-logging" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798675 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-acl-logging" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798687 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="northd" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798695 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="northd" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798733 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-node" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798766 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-node" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798787 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798796 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798807 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798815 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798826 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="sbdb" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798835 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="sbdb" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798847 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9986e410-984a-466f-bb26-b1644bc6c976" containerName="registry" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798855 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9986e410-984a-466f-bb26-b1644bc6c976" containerName="registry" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798864 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798872 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.798882 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kubecfg-setup" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.798890 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kubecfg-setup" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799025 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799038 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9986e410-984a-466f-bb26-b1644bc6c976" containerName="registry" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799051 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="northd" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799061 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799069 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799117 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799128 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799138 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="sbdb" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799151 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="nbdb" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799161 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="kube-rbac-proxy-node" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799178 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovn-acl-logging" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.799290 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799300 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799419 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799429 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: E1126 15:37:43.799553 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.799563 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerName="ovnkube-controller" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.801681 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842121 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-var-lib-openvswitch\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842192 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-config\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842390 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-systemd\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842440 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842462 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-bin\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842485 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-etc-openvswitch\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842507 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-netd\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842540 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-script-lib\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842566 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-openvswitch\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842653 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842697 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842741 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843008 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-env-overrides\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843310 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.842766 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843100 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843402 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-ovn\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843477 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843628 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-slash\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843675 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-node-log\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.843741 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxqgl\" (UniqueName: \"kubernetes.io/projected/f10d9600-fac2-43e9-ad75-91b3c1f5b749-kube-api-access-qxqgl\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844064 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-slash" (OuterVolumeSpecName: "host-slash") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844106 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-node-log" (OuterVolumeSpecName: "node-log") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844140 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-kubelet\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844178 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-ovn-kubernetes\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844211 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-systemd-units\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844240 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-var-lib-cni-networks-ovn-kubernetes\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844250 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844300 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844341 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844372 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844398 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844456 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovn-node-metrics-cert\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844492 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-log-socket\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844517 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-netns\") pod \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\" (UID: \"f10d9600-fac2-43e9-ad75-91b3c1f5b749\") " Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844629 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.844811 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-log-socket" (OuterVolumeSpecName: "log-socket") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.845326 5010 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.845399 5010 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.845473 5010 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.845543 5010 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.845593 5010 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.845625 5010 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.845667 5010 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846103 5010 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-slash\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846140 5010 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-node-log\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846158 5010 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846187 5010 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846205 5010 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846223 5010 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846255 5010 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-log-socket\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846283 5010 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846301 5010 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.846319 5010 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.849698 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.851496 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f10d9600-fac2-43e9-ad75-91b3c1f5b749-kube-api-access-qxqgl" (OuterVolumeSpecName: "kube-api-access-qxqgl") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "kube-api-access-qxqgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.859156 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "f10d9600-fac2-43e9-ad75-91b3c1f5b749" (UID: "f10d9600-fac2-43e9-ad75-91b3c1f5b749"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947541 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-etc-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947593 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovnkube-config\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947611 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovnkube-script-lib\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947634 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-node-log\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947778 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947850 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-cni-bin\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947885 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovn-node-metrics-cert\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.947977 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-slash\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948027 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948075 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-var-lib-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948119 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxcdc\" (UniqueName: \"kubernetes.io/projected/53b64570-43a9-4d16-a607-31a7b54ad8b3-kube-api-access-nxcdc\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948160 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-systemd\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948222 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-systemd-units\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948262 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-env-overrides\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948294 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-cni-netd\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948328 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-ovn\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948370 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-run-netns\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948441 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948488 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-kubelet\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948517 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-log-socket\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948608 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxqgl\" (UniqueName: \"kubernetes.io/projected/f10d9600-fac2-43e9-ad75-91b3c1f5b749-kube-api-access-qxqgl\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948621 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/f10d9600-fac2-43e9-ad75-91b3c1f5b749-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:43 crc kubenswrapper[5010]: I1126 15:37:43.948632 5010 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/f10d9600-fac2-43e9-ad75-91b3c1f5b749-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049203 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovnkube-config\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049250 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovnkube-script-lib\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049274 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-node-log\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049299 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049319 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-cni-bin\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049337 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovn-node-metrics-cert\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049359 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-slash\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049376 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049399 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-var-lib-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049420 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxcdc\" (UniqueName: \"kubernetes.io/projected/53b64570-43a9-4d16-a607-31a7b54ad8b3-kube-api-access-nxcdc\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049425 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-run-ovn-kubernetes\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049456 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-cni-bin\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049481 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-systemd\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049440 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-systemd\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049515 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-systemd-units\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049523 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-node-log\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049554 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049559 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-slash\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049582 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-systemd-units\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049611 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-var-lib-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049616 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-cni-netd\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049635 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-cni-netd\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049663 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-env-overrides\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049882 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-ovn\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049901 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-run-netns\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049932 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049949 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-kubelet\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049964 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-log-socket\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.049977 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-etc-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050029 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-etc-openvswitch\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050052 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-run-ovn\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050070 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-run-netns\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050092 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050117 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-host-kubelet\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050140 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/53b64570-43a9-4d16-a607-31a7b54ad8b3-log-socket\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050399 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovnkube-config\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050409 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovnkube-script-lib\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.050599 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/53b64570-43a9-4d16-a607-31a7b54ad8b3-env-overrides\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.053985 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/53b64570-43a9-4d16-a607-31a7b54ad8b3-ovn-node-metrics-cert\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.066301 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxcdc\" (UniqueName: \"kubernetes.io/projected/53b64570-43a9-4d16-a607-31a7b54ad8b3-kube-api-access-nxcdc\") pod \"ovnkube-node-hnjhm\" (UID: \"53b64570-43a9-4d16-a607-31a7b54ad8b3\") " pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.122569 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.372495 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/2.log" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.373020 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/1.log" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.373054 5010 generic.go:334] "Generic (PLEG): container finished" podID="0a5a476f-6c13-4c62-8042-d9b37846aa18" containerID="5358d44abff63ec38fcdcf83ef302371855bdc2ea7e63d36b38665e5a8434fdb" exitCode=2 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.373112 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerDied","Data":"5358d44abff63ec38fcdcf83ef302371855bdc2ea7e63d36b38665e5a8434fdb"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.373148 5010 scope.go:117] "RemoveContainer" containerID="fa25b382ccadfbf25811db11c3987032b0e6def2a717986cb0d6fd7999f0f67a" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.374555 5010 generic.go:334] "Generic (PLEG): container finished" podID="53b64570-43a9-4d16-a607-31a7b54ad8b3" containerID="9938cf9ffa9a2e397d6deb45ff83b2fdde184118f7237f9684c5c08c3240d0e0" exitCode=0 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.374591 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerDied","Data":"9938cf9ffa9a2e397d6deb45ff83b2fdde184118f7237f9684c5c08c3240d0e0"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.374606 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"01b2c2abdd3a6d48cb1ab385b744c0d88bdfb8d357ed6ef357b15f4569cf1602"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.375516 5010 scope.go:117] "RemoveContainer" containerID="5358d44abff63ec38fcdcf83ef302371855bdc2ea7e63d36b38665e5a8434fdb" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.376790 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-94lzp_openshift-multus(0a5a476f-6c13-4c62-8042-d9b37846aa18)\"" pod="openshift-multus/multus-94lzp" podUID="0a5a476f-6c13-4c62-8042-d9b37846aa18" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.382167 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovnkube-controller/3.log" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.387268 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovn-acl-logging/0.log" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.387935 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hlqt9_f10d9600-fac2-43e9-ad75-91b3c1f5b749/ovn-controller/0.log" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388371 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" exitCode=0 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388396 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" exitCode=0 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388406 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" exitCode=0 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388415 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" exitCode=0 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388422 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" exitCode=0 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388428 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" exitCode=0 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388435 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" exitCode=143 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388442 5010 generic.go:334] "Generic (PLEG): container finished" podID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" containerID="ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" exitCode=143 Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388462 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388491 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388496 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388504 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388516 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388528 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388538 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388552 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388563 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388569 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388574 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388579 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388584 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388589 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388594 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388600 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388605 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388613 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388621 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388627 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388632 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388636 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388642 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388647 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388652 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388657 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388662 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388667 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388674 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388681 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388687 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388693 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388699 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388800 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388809 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388814 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388820 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388824 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388829 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388838 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hlqt9" event={"ID":"f10d9600-fac2-43e9-ad75-91b3c1f5b749","Type":"ContainerDied","Data":"4552169679da420cd350889ea2b3776c357f92990caf200c15d5e729ed5c00f5"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388847 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388853 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388880 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388885 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388890 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388896 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388901 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388906 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388912 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.388917 5010 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.404096 5010 scope.go:117] "RemoveContainer" containerID="612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.424891 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.458670 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hlqt9"] Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.464228 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hlqt9"] Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.468241 5010 scope.go:117] "RemoveContainer" containerID="928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.507913 5010 scope.go:117] "RemoveContainer" containerID="d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.528955 5010 scope.go:117] "RemoveContainer" containerID="6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.547724 5010 scope.go:117] "RemoveContainer" containerID="1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.569868 5010 scope.go:117] "RemoveContainer" containerID="37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.599532 5010 scope.go:117] "RemoveContainer" containerID="0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.637100 5010 scope.go:117] "RemoveContainer" containerID="ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.662573 5010 scope.go:117] "RemoveContainer" containerID="4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.677277 5010 scope.go:117] "RemoveContainer" containerID="612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.677935 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": container with ID starting with 612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278 not found: ID does not exist" containerID="612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.677986 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} err="failed to get container status \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": rpc error: code = NotFound desc = could not find container \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": container with ID starting with 612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.678016 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.678421 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": container with ID starting with ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab not found: ID does not exist" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.678481 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} err="failed to get container status \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": rpc error: code = NotFound desc = could not find container \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": container with ID starting with ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.678525 5010 scope.go:117] "RemoveContainer" containerID="928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.679002 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": container with ID starting with 928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618 not found: ID does not exist" containerID="928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.679050 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} err="failed to get container status \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": rpc error: code = NotFound desc = could not find container \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": container with ID starting with 928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.679085 5010 scope.go:117] "RemoveContainer" containerID="d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.679421 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": container with ID starting with d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da not found: ID does not exist" containerID="d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.679463 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} err="failed to get container status \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": rpc error: code = NotFound desc = could not find container \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": container with ID starting with d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.679498 5010 scope.go:117] "RemoveContainer" containerID="6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.679910 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": container with ID starting with 6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a not found: ID does not exist" containerID="6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.679958 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} err="failed to get container status \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": rpc error: code = NotFound desc = could not find container \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": container with ID starting with 6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.679986 5010 scope.go:117] "RemoveContainer" containerID="1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.680377 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": container with ID starting with 1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466 not found: ID does not exist" containerID="1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.680410 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} err="failed to get container status \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": rpc error: code = NotFound desc = could not find container \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": container with ID starting with 1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.680429 5010 scope.go:117] "RemoveContainer" containerID="37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.680909 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": container with ID starting with 37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3 not found: ID does not exist" containerID="37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.680949 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} err="failed to get container status \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": rpc error: code = NotFound desc = could not find container \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": container with ID starting with 37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.680972 5010 scope.go:117] "RemoveContainer" containerID="0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.681271 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": container with ID starting with 0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604 not found: ID does not exist" containerID="0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.681309 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} err="failed to get container status \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": rpc error: code = NotFound desc = could not find container \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": container with ID starting with 0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.681331 5010 scope.go:117] "RemoveContainer" containerID="ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.681831 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": container with ID starting with ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842 not found: ID does not exist" containerID="ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.681870 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} err="failed to get container status \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": rpc error: code = NotFound desc = could not find container \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": container with ID starting with ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.681898 5010 scope.go:117] "RemoveContainer" containerID="4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1" Nov 26 15:37:44 crc kubenswrapper[5010]: E1126 15:37:44.682246 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": container with ID starting with 4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1 not found: ID does not exist" containerID="4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.682279 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} err="failed to get container status \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": rpc error: code = NotFound desc = could not find container \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": container with ID starting with 4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.682316 5010 scope.go:117] "RemoveContainer" containerID="612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.682677 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} err="failed to get container status \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": rpc error: code = NotFound desc = could not find container \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": container with ID starting with 612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.682746 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.683182 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} err="failed to get container status \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": rpc error: code = NotFound desc = could not find container \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": container with ID starting with ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.683223 5010 scope.go:117] "RemoveContainer" containerID="928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.684201 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} err="failed to get container status \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": rpc error: code = NotFound desc = could not find container \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": container with ID starting with 928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.684257 5010 scope.go:117] "RemoveContainer" containerID="d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.685979 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} err="failed to get container status \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": rpc error: code = NotFound desc = could not find container \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": container with ID starting with d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.686025 5010 scope.go:117] "RemoveContainer" containerID="6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.686468 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} err="failed to get container status \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": rpc error: code = NotFound desc = could not find container \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": container with ID starting with 6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.686504 5010 scope.go:117] "RemoveContainer" containerID="1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.687018 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} err="failed to get container status \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": rpc error: code = NotFound desc = could not find container \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": container with ID starting with 1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.687061 5010 scope.go:117] "RemoveContainer" containerID="37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.687381 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} err="failed to get container status \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": rpc error: code = NotFound desc = could not find container \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": container with ID starting with 37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.687426 5010 scope.go:117] "RemoveContainer" containerID="0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.687905 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} err="failed to get container status \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": rpc error: code = NotFound desc = could not find container \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": container with ID starting with 0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.687948 5010 scope.go:117] "RemoveContainer" containerID="ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.688335 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} err="failed to get container status \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": rpc error: code = NotFound desc = could not find container \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": container with ID starting with ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.688373 5010 scope.go:117] "RemoveContainer" containerID="4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.688785 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} err="failed to get container status \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": rpc error: code = NotFound desc = could not find container \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": container with ID starting with 4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.688840 5010 scope.go:117] "RemoveContainer" containerID="612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.689151 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} err="failed to get container status \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": rpc error: code = NotFound desc = could not find container \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": container with ID starting with 612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.689180 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.690161 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} err="failed to get container status \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": rpc error: code = NotFound desc = could not find container \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": container with ID starting with ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.690200 5010 scope.go:117] "RemoveContainer" containerID="928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.690527 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} err="failed to get container status \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": rpc error: code = NotFound desc = could not find container \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": container with ID starting with 928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.690549 5010 scope.go:117] "RemoveContainer" containerID="d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.691121 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} err="failed to get container status \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": rpc error: code = NotFound desc = could not find container \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": container with ID starting with d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.691157 5010 scope.go:117] "RemoveContainer" containerID="6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.691703 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} err="failed to get container status \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": rpc error: code = NotFound desc = could not find container \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": container with ID starting with 6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.691761 5010 scope.go:117] "RemoveContainer" containerID="1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.692138 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} err="failed to get container status \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": rpc error: code = NotFound desc = could not find container \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": container with ID starting with 1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.692177 5010 scope.go:117] "RemoveContainer" containerID="37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.692637 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} err="failed to get container status \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": rpc error: code = NotFound desc = could not find container \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": container with ID starting with 37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.692662 5010 scope.go:117] "RemoveContainer" containerID="0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.692985 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} err="failed to get container status \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": rpc error: code = NotFound desc = could not find container \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": container with ID starting with 0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.693065 5010 scope.go:117] "RemoveContainer" containerID="ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.693529 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} err="failed to get container status \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": rpc error: code = NotFound desc = could not find container \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": container with ID starting with ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.693556 5010 scope.go:117] "RemoveContainer" containerID="4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.693857 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} err="failed to get container status \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": rpc error: code = NotFound desc = could not find container \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": container with ID starting with 4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.693884 5010 scope.go:117] "RemoveContainer" containerID="612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.694295 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278"} err="failed to get container status \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": rpc error: code = NotFound desc = could not find container \"612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278\": container with ID starting with 612d30527535f2ab452b950d5d16794d60f01b4e79734bbb9a051dac10a84278 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.694320 5010 scope.go:117] "RemoveContainer" containerID="ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.694568 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab"} err="failed to get container status \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": rpc error: code = NotFound desc = could not find container \"ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab\": container with ID starting with ae1d168fb67032303362f7311532812db90f2e3b207c425a25c149dbf38a5aab not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.694608 5010 scope.go:117] "RemoveContainer" containerID="928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.696593 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618"} err="failed to get container status \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": rpc error: code = NotFound desc = could not find container \"928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618\": container with ID starting with 928efed8e8b837c919b2f0086efb3d8c28cad1453e802a3dc53dab930c51c618 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.696630 5010 scope.go:117] "RemoveContainer" containerID="d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.696995 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da"} err="failed to get container status \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": rpc error: code = NotFound desc = could not find container \"d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da\": container with ID starting with d899fd6bd123c967c4b8d27e8478882ec2c9aa989da065fc13b1ffed0c0783da not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.697036 5010 scope.go:117] "RemoveContainer" containerID="6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.697850 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a"} err="failed to get container status \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": rpc error: code = NotFound desc = could not find container \"6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a\": container with ID starting with 6e23204a4300ed5be0528873df96c05331ac3781db3b21c952c2fec0505a529a not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.697920 5010 scope.go:117] "RemoveContainer" containerID="1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.698450 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466"} err="failed to get container status \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": rpc error: code = NotFound desc = could not find container \"1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466\": container with ID starting with 1b542b450530ceeff03eb16cfdb347d6647a362ab253695c9905ea2ae3755466 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.698482 5010 scope.go:117] "RemoveContainer" containerID="37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.698831 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3"} err="failed to get container status \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": rpc error: code = NotFound desc = could not find container \"37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3\": container with ID starting with 37bad1bd577bd217830bb704240eb5327fc9027bebb8b22894cc9a1e149feae3 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.698868 5010 scope.go:117] "RemoveContainer" containerID="0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.699153 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604"} err="failed to get container status \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": rpc error: code = NotFound desc = could not find container \"0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604\": container with ID starting with 0d0a9a08551399c2cbf716669eca9d4fd80158a567f386352fd0d418ad2e8604 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.699173 5010 scope.go:117] "RemoveContainer" containerID="ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.699392 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842"} err="failed to get container status \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": rpc error: code = NotFound desc = could not find container \"ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842\": container with ID starting with ddd5f363357c9fae4365df8bee6a5a8e19aa64ef9d2f4d6e59f97f62d5fd5842 not found: ID does not exist" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.699413 5010 scope.go:117] "RemoveContainer" containerID="4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1" Nov 26 15:37:44 crc kubenswrapper[5010]: I1126 15:37:44.700070 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1"} err="failed to get container status \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": rpc error: code = NotFound desc = could not find container \"4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1\": container with ID starting with 4a42314e7b5bb0407c48f45c3627c698bb82dc993c58f1827a80e19ba424d0f1 not found: ID does not exist" Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.401261 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"acb6773ee15d9db9fa795038dda7c10a517652b9757f4ad8a6147795ab9290da"} Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.402685 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"456b28183e5aa45ad7e2eb9c2d40022aafefc2789be143de94a806ff1df1e7e3"} Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.402776 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"5c67c93e39f5c65b51412b17615ae771f925edfa10923dbc511d8d7c46b21fee"} Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.402936 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"5d02e18f737bb7807219a71a987dc5d86ab316c7884768447559456c5d42d679"} Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.403045 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"4432e028642644fcff6d7e2e79c736219c748393c3cafba4d481a54f6facb549"} Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.403102 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"5e9338e046e78dfd0eb49adfc44baaa5fc4ce3b6be20d67364c49444ed35f326"} Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.403358 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/2.log" Nov 26 15:37:45 crc kubenswrapper[5010]: I1126 15:37:45.902471 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f10d9600-fac2-43e9-ad75-91b3c1f5b749" path="/var/lib/kubelet/pods/f10d9600-fac2-43e9-ad75-91b3c1f5b749/volumes" Nov 26 15:37:48 crc kubenswrapper[5010]: I1126 15:37:48.438266 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"e5ea7c95ce8ff9e602bb65faccf511bfaca56eb6077dddbedfe63fc17f72a330"} Nov 26 15:37:50 crc kubenswrapper[5010]: I1126 15:37:50.454318 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" event={"ID":"53b64570-43a9-4d16-a607-31a7b54ad8b3","Type":"ContainerStarted","Data":"ce2ab2617363c9080e11d9a68edd6f714e2640e5e49e738b6f911ac4559714b8"} Nov 26 15:37:50 crc kubenswrapper[5010]: I1126 15:37:50.454987 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:50 crc kubenswrapper[5010]: I1126 15:37:50.455000 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:50 crc kubenswrapper[5010]: I1126 15:37:50.455010 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:50 crc kubenswrapper[5010]: I1126 15:37:50.482804 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:50 crc kubenswrapper[5010]: I1126 15:37:50.489839 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" podStartSLOduration=7.489818792 podStartE2EDuration="7.489818792s" podCreationTimestamp="2025-11-26 15:37:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:37:50.485483614 +0000 UTC m=+691.276200772" watchObservedRunningTime="2025-11-26 15:37:50.489818792 +0000 UTC m=+691.280535940" Nov 26 15:37:50 crc kubenswrapper[5010]: I1126 15:37:50.493994 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.350971 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-fxchh"] Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.352633 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.356651 5010 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-pb5m9" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.358184 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.358582 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.358830 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.379559 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-fxchh"] Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.478880 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnrvl\" (UniqueName: \"kubernetes.io/projected/1757a1c9-a540-48c7-9943-19e8bc559556-kube-api-access-hnrvl\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.478934 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1757a1c9-a540-48c7-9943-19e8bc559556-node-mnt\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.479021 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1757a1c9-a540-48c7-9943-19e8bc559556-crc-storage\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.580438 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnrvl\" (UniqueName: \"kubernetes.io/projected/1757a1c9-a540-48c7-9943-19e8bc559556-kube-api-access-hnrvl\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.580483 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1757a1c9-a540-48c7-9943-19e8bc559556-node-mnt\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.580526 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1757a1c9-a540-48c7-9943-19e8bc559556-crc-storage\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.581010 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1757a1c9-a540-48c7-9943-19e8bc559556-node-mnt\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.581186 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1757a1c9-a540-48c7-9943-19e8bc559556-crc-storage\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.608120 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnrvl\" (UniqueName: \"kubernetes.io/projected/1757a1c9-a540-48c7-9943-19e8bc559556-kube-api-access-hnrvl\") pod \"crc-storage-crc-fxchh\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: I1126 15:37:52.677143 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: E1126 15:37:52.713467 5010 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(0450dc42e00146ee6d47fa17313d29385953d28fe16bc955436f14194018d173): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 15:37:52 crc kubenswrapper[5010]: E1126 15:37:52.713614 5010 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(0450dc42e00146ee6d47fa17313d29385953d28fe16bc955436f14194018d173): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: E1126 15:37:52.713693 5010 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(0450dc42e00146ee6d47fa17313d29385953d28fe16bc955436f14194018d173): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:52 crc kubenswrapper[5010]: E1126 15:37:52.713812 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-fxchh_crc-storage(1757a1c9-a540-48c7-9943-19e8bc559556)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-fxchh_crc-storage(1757a1c9-a540-48c7-9943-19e8bc559556)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(0450dc42e00146ee6d47fa17313d29385953d28fe16bc955436f14194018d173): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-fxchh" podUID="1757a1c9-a540-48c7-9943-19e8bc559556" Nov 26 15:37:53 crc kubenswrapper[5010]: I1126 15:37:53.479824 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:53 crc kubenswrapper[5010]: I1126 15:37:53.480997 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:53 crc kubenswrapper[5010]: E1126 15:37:53.522177 5010 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(bf147abcef09f8127edf1a25d8424ee7efec7157c9575ee41f6c571811088d4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 15:37:53 crc kubenswrapper[5010]: E1126 15:37:53.522286 5010 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(bf147abcef09f8127edf1a25d8424ee7efec7157c9575ee41f6c571811088d4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:53 crc kubenswrapper[5010]: E1126 15:37:53.522328 5010 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(bf147abcef09f8127edf1a25d8424ee7efec7157c9575ee41f6c571811088d4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:37:53 crc kubenswrapper[5010]: E1126 15:37:53.522410 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-fxchh_crc-storage(1757a1c9-a540-48c7-9943-19e8bc559556)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-fxchh_crc-storage(1757a1c9-a540-48c7-9943-19e8bc559556)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(bf147abcef09f8127edf1a25d8424ee7efec7157c9575ee41f6c571811088d4b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-fxchh" podUID="1757a1c9-a540-48c7-9943-19e8bc559556" Nov 26 15:37:56 crc kubenswrapper[5010]: I1126 15:37:56.892829 5010 scope.go:117] "RemoveContainer" containerID="5358d44abff63ec38fcdcf83ef302371855bdc2ea7e63d36b38665e5a8434fdb" Nov 26 15:37:56 crc kubenswrapper[5010]: E1126 15:37:56.893179 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-94lzp_openshift-multus(0a5a476f-6c13-4c62-8042-d9b37846aa18)\"" pod="openshift-multus/multus-94lzp" podUID="0a5a476f-6c13-4c62-8042-d9b37846aa18" Nov 26 15:38:05 crc kubenswrapper[5010]: I1126 15:38:05.891051 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:05 crc kubenswrapper[5010]: I1126 15:38:05.892398 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:05 crc kubenswrapper[5010]: E1126 15:38:05.935922 5010 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(cff8683f6338e4d392504fa6ee6a750c7a34e8802098d0df923092fa5054699b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 15:38:05 crc kubenswrapper[5010]: E1126 15:38:05.936029 5010 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(cff8683f6338e4d392504fa6ee6a750c7a34e8802098d0df923092fa5054699b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:05 crc kubenswrapper[5010]: E1126 15:38:05.936070 5010 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(cff8683f6338e4d392504fa6ee6a750c7a34e8802098d0df923092fa5054699b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:05 crc kubenswrapper[5010]: E1126 15:38:05.936172 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"crc-storage-crc-fxchh_crc-storage(1757a1c9-a540-48c7-9943-19e8bc559556)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"crc-storage-crc-fxchh_crc-storage(1757a1c9-a540-48c7-9943-19e8bc559556)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_crc-storage-crc-fxchh_crc-storage_1757a1c9-a540-48c7-9943-19e8bc559556_0(cff8683f6338e4d392504fa6ee6a750c7a34e8802098d0df923092fa5054699b): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="crc-storage/crc-storage-crc-fxchh" podUID="1757a1c9-a540-48c7-9943-19e8bc559556" Nov 26 15:38:09 crc kubenswrapper[5010]: I1126 15:38:09.892633 5010 scope.go:117] "RemoveContainer" containerID="5358d44abff63ec38fcdcf83ef302371855bdc2ea7e63d36b38665e5a8434fdb" Nov 26 15:38:10 crc kubenswrapper[5010]: I1126 15:38:10.606063 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-94lzp_0a5a476f-6c13-4c62-8042-d9b37846aa18/kube-multus/2.log" Nov 26 15:38:10 crc kubenswrapper[5010]: I1126 15:38:10.606629 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-94lzp" event={"ID":"0a5a476f-6c13-4c62-8042-d9b37846aa18","Type":"ContainerStarted","Data":"71d73d0eb0142d782c717c3ab1b62a62b1f1a9eb6e330b355f9da190bddd2fe3"} Nov 26 15:38:11 crc kubenswrapper[5010]: I1126 15:38:11.425094 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:38:11 crc kubenswrapper[5010]: I1126 15:38:11.425204 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:38:14 crc kubenswrapper[5010]: I1126 15:38:14.154542 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hnjhm" Nov 26 15:38:17 crc kubenswrapper[5010]: I1126 15:38:17.891132 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:17 crc kubenswrapper[5010]: I1126 15:38:17.892462 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:18 crc kubenswrapper[5010]: I1126 15:38:18.401266 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-fxchh"] Nov 26 15:38:18 crc kubenswrapper[5010]: I1126 15:38:18.414840 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:38:18 crc kubenswrapper[5010]: I1126 15:38:18.674760 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-fxchh" event={"ID":"1757a1c9-a540-48c7-9943-19e8bc559556","Type":"ContainerStarted","Data":"d77010ef2c9b22122ad7216e434057a1ee1802f4ce305b724c04fc103b16d0f5"} Nov 26 15:38:21 crc kubenswrapper[5010]: I1126 15:38:21.693494 5010 generic.go:334] "Generic (PLEG): container finished" podID="1757a1c9-a540-48c7-9943-19e8bc559556" containerID="0e2d25cd12a82c87aa75f3fc80455038c19edb34d3d9698056e6b3d77b2df691" exitCode=0 Nov 26 15:38:21 crc kubenswrapper[5010]: I1126 15:38:21.693611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-fxchh" event={"ID":"1757a1c9-a540-48c7-9943-19e8bc559556","Type":"ContainerDied","Data":"0e2d25cd12a82c87aa75f3fc80455038c19edb34d3d9698056e6b3d77b2df691"} Nov 26 15:38:22 crc kubenswrapper[5010]: I1126 15:38:22.988623 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.159242 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1757a1c9-a540-48c7-9943-19e8bc559556-crc-storage\") pod \"1757a1c9-a540-48c7-9943-19e8bc559556\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.160046 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1757a1c9-a540-48c7-9943-19e8bc559556-node-mnt\") pod \"1757a1c9-a540-48c7-9943-19e8bc559556\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.160168 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1757a1c9-a540-48c7-9943-19e8bc559556-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "1757a1c9-a540-48c7-9943-19e8bc559556" (UID: "1757a1c9-a540-48c7-9943-19e8bc559556"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.160324 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnrvl\" (UniqueName: \"kubernetes.io/projected/1757a1c9-a540-48c7-9943-19e8bc559556-kube-api-access-hnrvl\") pod \"1757a1c9-a540-48c7-9943-19e8bc559556\" (UID: \"1757a1c9-a540-48c7-9943-19e8bc559556\") " Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.160909 5010 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/1757a1c9-a540-48c7-9943-19e8bc559556-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.166944 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1757a1c9-a540-48c7-9943-19e8bc559556-kube-api-access-hnrvl" (OuterVolumeSpecName: "kube-api-access-hnrvl") pod "1757a1c9-a540-48c7-9943-19e8bc559556" (UID: "1757a1c9-a540-48c7-9943-19e8bc559556"). InnerVolumeSpecName "kube-api-access-hnrvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.172164 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1757a1c9-a540-48c7-9943-19e8bc559556-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "1757a1c9-a540-48c7-9943-19e8bc559556" (UID: "1757a1c9-a540-48c7-9943-19e8bc559556"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.262123 5010 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/1757a1c9-a540-48c7-9943-19e8bc559556-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.262178 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnrvl\" (UniqueName: \"kubernetes.io/projected/1757a1c9-a540-48c7-9943-19e8bc559556-kube-api-access-hnrvl\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.714494 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-fxchh" event={"ID":"1757a1c9-a540-48c7-9943-19e8bc559556","Type":"ContainerDied","Data":"d77010ef2c9b22122ad7216e434057a1ee1802f4ce305b724c04fc103b16d0f5"} Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.714889 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d77010ef2c9b22122ad7216e434057a1ee1802f4ce305b724c04fc103b16d0f5" Nov 26 15:38:23 crc kubenswrapper[5010]: I1126 15:38:23.714665 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-fxchh" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.887399 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp"] Nov 26 15:38:31 crc kubenswrapper[5010]: E1126 15:38:31.889199 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1757a1c9-a540-48c7-9943-19e8bc559556" containerName="storage" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.889232 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1757a1c9-a540-48c7-9943-19e8bc559556" containerName="storage" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.889413 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1757a1c9-a540-48c7-9943-19e8bc559556" containerName="storage" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.890185 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.898517 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp"] Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.902352 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.995170 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.995244 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:31 crc kubenswrapper[5010]: I1126 15:38:31.995502 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g577j\" (UniqueName: \"kubernetes.io/projected/6afdbf0c-4651-414d-9aca-9a74ec043b34-kube-api-access-g577j\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.097624 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.097762 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.097837 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g577j\" (UniqueName: \"kubernetes.io/projected/6afdbf0c-4651-414d-9aca-9a74ec043b34-kube-api-access-g577j\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.098636 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.098821 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.131746 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g577j\" (UniqueName: \"kubernetes.io/projected/6afdbf0c-4651-414d-9aca-9a74ec043b34-kube-api-access-g577j\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.220437 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.473844 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp"] Nov 26 15:38:32 crc kubenswrapper[5010]: W1126 15:38:32.484849 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6afdbf0c_4651_414d_9aca_9a74ec043b34.slice/crio-219d54680e2572a533342009101e21c259a9b35f713a7cfe6271d1958c1e46ab WatchSource:0}: Error finding container 219d54680e2572a533342009101e21c259a9b35f713a7cfe6271d1958c1e46ab: Status 404 returned error can't find the container with id 219d54680e2572a533342009101e21c259a9b35f713a7cfe6271d1958c1e46ab Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.769958 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" event={"ID":"6afdbf0c-4651-414d-9aca-9a74ec043b34","Type":"ContainerStarted","Data":"83b230eb173b649e3d51b6f2d4f21809ba4b176339e483d6e072cc48149ee5cd"} Nov 26 15:38:32 crc kubenswrapper[5010]: I1126 15:38:32.770009 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" event={"ID":"6afdbf0c-4651-414d-9aca-9a74ec043b34","Type":"ContainerStarted","Data":"219d54680e2572a533342009101e21c259a9b35f713a7cfe6271d1958c1e46ab"} Nov 26 15:38:33 crc kubenswrapper[5010]: I1126 15:38:33.782539 5010 generic.go:334] "Generic (PLEG): container finished" podID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerID="83b230eb173b649e3d51b6f2d4f21809ba4b176339e483d6e072cc48149ee5cd" exitCode=0 Nov 26 15:38:33 crc kubenswrapper[5010]: I1126 15:38:33.782607 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" event={"ID":"6afdbf0c-4651-414d-9aca-9a74ec043b34","Type":"ContainerDied","Data":"83b230eb173b649e3d51b6f2d4f21809ba4b176339e483d6e072cc48149ee5cd"} Nov 26 15:38:35 crc kubenswrapper[5010]: I1126 15:38:35.802259 5010 generic.go:334] "Generic (PLEG): container finished" podID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerID="467dd932480a4d3c8fcbff42451a32c76eca6f2c6b6b69bdcf2aabe5b0e5a989" exitCode=0 Nov 26 15:38:35 crc kubenswrapper[5010]: I1126 15:38:35.802345 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" event={"ID":"6afdbf0c-4651-414d-9aca-9a74ec043b34","Type":"ContainerDied","Data":"467dd932480a4d3c8fcbff42451a32c76eca6f2c6b6b69bdcf2aabe5b0e5a989"} Nov 26 15:38:36 crc kubenswrapper[5010]: I1126 15:38:36.815652 5010 generic.go:334] "Generic (PLEG): container finished" podID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerID="5814f00e3f1d39d2c3a2dfe4f47498b9c2875bf8f1dde03f1ae93cd2515c05a0" exitCode=0 Nov 26 15:38:36 crc kubenswrapper[5010]: I1126 15:38:36.815742 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" event={"ID":"6afdbf0c-4651-414d-9aca-9a74ec043b34","Type":"ContainerDied","Data":"5814f00e3f1d39d2c3a2dfe4f47498b9c2875bf8f1dde03f1ae93cd2515c05a0"} Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.143025 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.304379 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g577j\" (UniqueName: \"kubernetes.io/projected/6afdbf0c-4651-414d-9aca-9a74ec043b34-kube-api-access-g577j\") pod \"6afdbf0c-4651-414d-9aca-9a74ec043b34\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.304438 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-bundle\") pod \"6afdbf0c-4651-414d-9aca-9a74ec043b34\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.304486 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-util\") pod \"6afdbf0c-4651-414d-9aca-9a74ec043b34\" (UID: \"6afdbf0c-4651-414d-9aca-9a74ec043b34\") " Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.305883 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-bundle" (OuterVolumeSpecName: "bundle") pod "6afdbf0c-4651-414d-9aca-9a74ec043b34" (UID: "6afdbf0c-4651-414d-9aca-9a74ec043b34"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.313489 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6afdbf0c-4651-414d-9aca-9a74ec043b34-kube-api-access-g577j" (OuterVolumeSpecName: "kube-api-access-g577j") pod "6afdbf0c-4651-414d-9aca-9a74ec043b34" (UID: "6afdbf0c-4651-414d-9aca-9a74ec043b34"). InnerVolumeSpecName "kube-api-access-g577j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.316482 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-util" (OuterVolumeSpecName: "util") pod "6afdbf0c-4651-414d-9aca-9a74ec043b34" (UID: "6afdbf0c-4651-414d-9aca-9a74ec043b34"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.406522 5010 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-util\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.406557 5010 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6afdbf0c-4651-414d-9aca-9a74ec043b34-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.406568 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g577j\" (UniqueName: \"kubernetes.io/projected/6afdbf0c-4651-414d-9aca-9a74ec043b34-kube-api-access-g577j\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.832172 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" event={"ID":"6afdbf0c-4651-414d-9aca-9a74ec043b34","Type":"ContainerDied","Data":"219d54680e2572a533342009101e21c259a9b35f713a7cfe6271d1958c1e46ab"} Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.832220 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="219d54680e2572a533342009101e21c259a9b35f713a7cfe6271d1958c1e46ab" Nov 26 15:38:38 crc kubenswrapper[5010]: I1126 15:38:38.832273 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp" Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.422888 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.423170 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.423222 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.423889 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"07de4390fc3c8495bcdc1f46830e2b986f1ed25110c72eb2d6d31304d8ef46ee"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.423948 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://07de4390fc3c8495bcdc1f46830e2b986f1ed25110c72eb2d6d31304d8ef46ee" gracePeriod=600 Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.854680 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="07de4390fc3c8495bcdc1f46830e2b986f1ed25110c72eb2d6d31304d8ef46ee" exitCode=0 Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.854813 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"07de4390fc3c8495bcdc1f46830e2b986f1ed25110c72eb2d6d31304d8ef46ee"} Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.854964 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"866a4d79b3a741e66d3af7f04184bb9e206692b2113aca2fc0a5c00bbc84fa10"} Nov 26 15:38:41 crc kubenswrapper[5010]: I1126 15:38:41.855002 5010 scope.go:117] "RemoveContainer" containerID="cb7781e50e97e8aaffc05be4d03baf866d7e4a9a0d796b777de62a3d4894cdb4" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.448763 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-2whk4"] Nov 26 15:38:43 crc kubenswrapper[5010]: E1126 15:38:43.449608 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerName="extract" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.449624 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerName="extract" Nov 26 15:38:43 crc kubenswrapper[5010]: E1126 15:38:43.449646 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerName="pull" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.449657 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerName="pull" Nov 26 15:38:43 crc kubenswrapper[5010]: E1126 15:38:43.449674 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerName="util" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.449684 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerName="util" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.449870 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6afdbf0c-4651-414d-9aca-9a74ec043b34" containerName="extract" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.450439 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.453356 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-9hqt5" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.453903 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.454167 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.459093 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-2whk4"] Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.576080 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbrm4\" (UniqueName: \"kubernetes.io/projected/f9552f8b-91b2-41aa-a1f4-2239ee49085d-kube-api-access-bbrm4\") pod \"nmstate-operator-557fdffb88-2whk4\" (UID: \"f9552f8b-91b2-41aa-a1f4-2239ee49085d\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.677915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbrm4\" (UniqueName: \"kubernetes.io/projected/f9552f8b-91b2-41aa-a1f4-2239ee49085d-kube-api-access-bbrm4\") pod \"nmstate-operator-557fdffb88-2whk4\" (UID: \"f9552f8b-91b2-41aa-a1f4-2239ee49085d\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.697897 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbrm4\" (UniqueName: \"kubernetes.io/projected/f9552f8b-91b2-41aa-a1f4-2239ee49085d-kube-api-access-bbrm4\") pod \"nmstate-operator-557fdffb88-2whk4\" (UID: \"f9552f8b-91b2-41aa-a1f4-2239ee49085d\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.768377 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" Nov 26 15:38:43 crc kubenswrapper[5010]: I1126 15:38:43.980586 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-2whk4"] Nov 26 15:38:43 crc kubenswrapper[5010]: W1126 15:38:43.985639 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf9552f8b_91b2_41aa_a1f4_2239ee49085d.slice/crio-51c9925689a9b065379fe7697045f04a3eadc0017c99f150bcf2cfaf30cc3a99 WatchSource:0}: Error finding container 51c9925689a9b065379fe7697045f04a3eadc0017c99f150bcf2cfaf30cc3a99: Status 404 returned error can't find the container with id 51c9925689a9b065379fe7697045f04a3eadc0017c99f150bcf2cfaf30cc3a99 Nov 26 15:38:44 crc kubenswrapper[5010]: I1126 15:38:44.879344 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" event={"ID":"f9552f8b-91b2-41aa-a1f4-2239ee49085d","Type":"ContainerStarted","Data":"51c9925689a9b065379fe7697045f04a3eadc0017c99f150bcf2cfaf30cc3a99"} Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.399451 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-6pfg9"] Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.400039 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" podUID="6ef3bb6f-39b5-4919-90a4-897d4841c9f1" containerName="controller-manager" containerID="cri-o://c06034f69c2eaa634cd3534c7d7e3290a5173bc151be854a810b7cff67d2be0b" gracePeriod=30 Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.498259 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt"] Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.498508 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" podUID="a33ae656-009d-4adb-80ef-143cb00bba21" containerName="route-controller-manager" containerID="cri-o://d7a2c84dd94c042320b47f83e8811322fb609bdff0b5e30570bf99dad5a0cd34" gracePeriod=30 Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.900587 5010 generic.go:334] "Generic (PLEG): container finished" podID="6ef3bb6f-39b5-4919-90a4-897d4841c9f1" containerID="c06034f69c2eaa634cd3534c7d7e3290a5173bc151be854a810b7cff67d2be0b" exitCode=0 Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.900759 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" event={"ID":"6ef3bb6f-39b5-4919-90a4-897d4841c9f1","Type":"ContainerDied","Data":"c06034f69c2eaa634cd3534c7d7e3290a5173bc151be854a810b7cff67d2be0b"} Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.902939 5010 generic.go:334] "Generic (PLEG): container finished" podID="a33ae656-009d-4adb-80ef-143cb00bba21" containerID="d7a2c84dd94c042320b47f83e8811322fb609bdff0b5e30570bf99dad5a0cd34" exitCode=0 Nov 26 15:38:46 crc kubenswrapper[5010]: I1126 15:38:46.902999 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" event={"ID":"a33ae656-009d-4adb-80ef-143cb00bba21","Type":"ContainerDied","Data":"d7a2c84dd94c042320b47f83e8811322fb609bdff0b5e30570bf99dad5a0cd34"} Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.092848 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.142587 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9bpr\" (UniqueName: \"kubernetes.io/projected/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-kube-api-access-h9bpr\") pod \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.143075 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-proxy-ca-bundles\") pod \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.149432 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-config\") pod \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.149489 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-serving-cert\") pod \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.149572 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-client-ca\") pod \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\" (UID: \"6ef3bb6f-39b5-4919-90a4-897d4841c9f1\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.150851 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "6ef3bb6f-39b5-4919-90a4-897d4841c9f1" (UID: "6ef3bb6f-39b5-4919-90a4-897d4841c9f1"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.150860 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-client-ca" (OuterVolumeSpecName: "client-ca") pod "6ef3bb6f-39b5-4919-90a4-897d4841c9f1" (UID: "6ef3bb6f-39b5-4919-90a4-897d4841c9f1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.151509 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-config" (OuterVolumeSpecName: "config") pod "6ef3bb6f-39b5-4919-90a4-897d4841c9f1" (UID: "6ef3bb6f-39b5-4919-90a4-897d4841c9f1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.159895 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.167114 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-kube-api-access-h9bpr" (OuterVolumeSpecName: "kube-api-access-h9bpr") pod "6ef3bb6f-39b5-4919-90a4-897d4841c9f1" (UID: "6ef3bb6f-39b5-4919-90a4-897d4841c9f1"). InnerVolumeSpecName "kube-api-access-h9bpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.168750 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6ef3bb6f-39b5-4919-90a4-897d4841c9f1" (UID: "6ef3bb6f-39b5-4919-90a4-897d4841c9f1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.251268 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwmm4\" (UniqueName: \"kubernetes.io/projected/a33ae656-009d-4adb-80ef-143cb00bba21-kube-api-access-lwmm4\") pod \"a33ae656-009d-4adb-80ef-143cb00bba21\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252131 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-client-ca\") pod \"a33ae656-009d-4adb-80ef-143cb00bba21\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252197 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a33ae656-009d-4adb-80ef-143cb00bba21-serving-cert\") pod \"a33ae656-009d-4adb-80ef-143cb00bba21\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252221 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-config\") pod \"a33ae656-009d-4adb-80ef-143cb00bba21\" (UID: \"a33ae656-009d-4adb-80ef-143cb00bba21\") " Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252493 5010 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252512 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9bpr\" (UniqueName: \"kubernetes.io/projected/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-kube-api-access-h9bpr\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252522 5010 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252531 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252539 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6ef3bb6f-39b5-4919-90a4-897d4841c9f1-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.252840 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-client-ca" (OuterVolumeSpecName: "client-ca") pod "a33ae656-009d-4adb-80ef-143cb00bba21" (UID: "a33ae656-009d-4adb-80ef-143cb00bba21"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.253427 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-config" (OuterVolumeSpecName: "config") pod "a33ae656-009d-4adb-80ef-143cb00bba21" (UID: "a33ae656-009d-4adb-80ef-143cb00bba21"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.254792 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a33ae656-009d-4adb-80ef-143cb00bba21-kube-api-access-lwmm4" (OuterVolumeSpecName: "kube-api-access-lwmm4") pod "a33ae656-009d-4adb-80ef-143cb00bba21" (UID: "a33ae656-009d-4adb-80ef-143cb00bba21"). InnerVolumeSpecName "kube-api-access-lwmm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.259893 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a33ae656-009d-4adb-80ef-143cb00bba21-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a33ae656-009d-4adb-80ef-143cb00bba21" (UID: "a33ae656-009d-4adb-80ef-143cb00bba21"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.354048 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwmm4\" (UniqueName: \"kubernetes.io/projected/a33ae656-009d-4adb-80ef-143cb00bba21-kube-api-access-lwmm4\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.354655 5010 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.354731 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a33ae656-009d-4adb-80ef-143cb00bba21-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.354784 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a33ae656-009d-4adb-80ef-143cb00bba21-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.647765 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5"] Nov 26 15:38:47 crc kubenswrapper[5010]: E1126 15:38:47.648270 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ef3bb6f-39b5-4919-90a4-897d4841c9f1" containerName="controller-manager" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.648284 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ef3bb6f-39b5-4919-90a4-897d4841c9f1" containerName="controller-manager" Nov 26 15:38:47 crc kubenswrapper[5010]: E1126 15:38:47.648301 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a33ae656-009d-4adb-80ef-143cb00bba21" containerName="route-controller-manager" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.648310 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a33ae656-009d-4adb-80ef-143cb00bba21" containerName="route-controller-manager" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.648432 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a33ae656-009d-4adb-80ef-143cb00bba21" containerName="route-controller-manager" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.648447 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ef3bb6f-39b5-4919-90a4-897d4841c9f1" containerName="controller-manager" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.648909 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.650322 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc"] Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.650903 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.654898 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc"] Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.658068 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-client-ca\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.658114 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-proxy-ca-bundles\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.658164 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edc69b5c-4248-4f86-8602-7c31258ac2c3-serving-cert\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.658187 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqcz7\" (UniqueName: \"kubernetes.io/projected/edc69b5c-4248-4f86-8602-7c31258ac2c3-kube-api-access-gqcz7\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.658207 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-config\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.659672 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5"] Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.759102 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-client-ca\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.759612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-client-ca\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.759699 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8lzm\" (UniqueName: \"kubernetes.io/projected/19360de4-bc38-43e1-8b9a-8422b0213697-kube-api-access-q8lzm\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.760004 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-proxy-ca-bundles\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.760217 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19360de4-bc38-43e1-8b9a-8422b0213697-serving-cert\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.760410 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edc69b5c-4248-4f86-8602-7c31258ac2c3-serving-cert\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.760505 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqcz7\" (UniqueName: \"kubernetes.io/projected/edc69b5c-4248-4f86-8602-7c31258ac2c3-kube-api-access-gqcz7\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.760583 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-config\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.760658 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-config\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.760591 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-client-ca\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.761547 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-config\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.761668 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-proxy-ca-bundles\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.768590 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edc69b5c-4248-4f86-8602-7c31258ac2c3-serving-cert\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.775862 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqcz7\" (UniqueName: \"kubernetes.io/projected/edc69b5c-4248-4f86-8602-7c31258ac2c3-kube-api-access-gqcz7\") pod \"controller-manager-dc5b66b4d-4x6v5\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.862451 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19360de4-bc38-43e1-8b9a-8422b0213697-serving-cert\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.862524 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-config\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.862563 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-client-ca\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.862596 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8lzm\" (UniqueName: \"kubernetes.io/projected/19360de4-bc38-43e1-8b9a-8422b0213697-kube-api-access-q8lzm\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.863799 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-client-ca\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.863882 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-config\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.868759 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19360de4-bc38-43e1-8b9a-8422b0213697-serving-cert\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.883553 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8lzm\" (UniqueName: \"kubernetes.io/projected/19360de4-bc38-43e1-8b9a-8422b0213697-kube-api-access-q8lzm\") pod \"route-controller-manager-5fcdffd88f-m2ljc\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.917814 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.917893 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-6pfg9" event={"ID":"6ef3bb6f-39b5-4919-90a4-897d4841c9f1","Type":"ContainerDied","Data":"3942d6ad7fb137be45830c50e48b0ae9193e5c43df15ae4ef7b0eef04bbec242"} Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.918355 5010 scope.go:117] "RemoveContainer" containerID="c06034f69c2eaa634cd3534c7d7e3290a5173bc151be854a810b7cff67d2be0b" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.920002 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" event={"ID":"a33ae656-009d-4adb-80ef-143cb00bba21","Type":"ContainerDied","Data":"97f5628523486d97e9f108aead5e237da573a3396a869d062e236dcd9d703967"} Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.920038 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.921183 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" event={"ID":"f9552f8b-91b2-41aa-a1f4-2239ee49085d","Type":"ContainerStarted","Data":"c1647ad14afbe8d36708dfb520a92a64ae65ef61964fc7841e999d5cc28c9ade"} Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.939314 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5"] Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.941058 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.944953 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc"] Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.945444 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.954829 5010 scope.go:117] "RemoveContainer" containerID="d7a2c84dd94c042320b47f83e8811322fb609bdff0b5e30570bf99dad5a0cd34" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.960077 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-2whk4" podStartSLOduration=1.864100815 podStartE2EDuration="4.960058552s" podCreationTimestamp="2025-11-26 15:38:43 +0000 UTC" firstStartedPulling="2025-11-26 15:38:43.987907731 +0000 UTC m=+744.778624869" lastFinishedPulling="2025-11-26 15:38:47.083865458 +0000 UTC m=+747.874582606" observedRunningTime="2025-11-26 15:38:47.956840912 +0000 UTC m=+748.747558070" watchObservedRunningTime="2025-11-26 15:38:47.960058552 +0000 UTC m=+748.750775700" Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.988601 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-6pfg9"] Nov 26 15:38:47 crc kubenswrapper[5010]: I1126 15:38:47.991432 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-6pfg9"] Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.007694 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt"] Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.013700 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-tmngt"] Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.245996 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5"] Nov 26 15:38:48 crc kubenswrapper[5010]: W1126 15:38:48.258435 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podedc69b5c_4248_4f86_8602_7c31258ac2c3.slice/crio-a8e5b3573a37f8041dbc38cae44fdd1e7fac7accf6418a9e6aede2d3d059c0fe WatchSource:0}: Error finding container a8e5b3573a37f8041dbc38cae44fdd1e7fac7accf6418a9e6aede2d3d059c0fe: Status 404 returned error can't find the container with id a8e5b3573a37f8041dbc38cae44fdd1e7fac7accf6418a9e6aede2d3d059c0fe Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.276267 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc"] Nov 26 15:38:48 crc kubenswrapper[5010]: W1126 15:38:48.293896 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19360de4_bc38_43e1_8b9a_8422b0213697.slice/crio-728867e31e03aad92d4498ff6f8de7bfd6242048705270789e6c2ba91a23bf27 WatchSource:0}: Error finding container 728867e31e03aad92d4498ff6f8de7bfd6242048705270789e6c2ba91a23bf27: Status 404 returned error can't find the container with id 728867e31e03aad92d4498ff6f8de7bfd6242048705270789e6c2ba91a23bf27 Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.928551 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" event={"ID":"edc69b5c-4248-4f86-8602-7c31258ac2c3","Type":"ContainerStarted","Data":"d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43"} Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.928602 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" event={"ID":"edc69b5c-4248-4f86-8602-7c31258ac2c3","Type":"ContainerStarted","Data":"a8e5b3573a37f8041dbc38cae44fdd1e7fac7accf6418a9e6aede2d3d059c0fe"} Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.928778 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" podUID="edc69b5c-4248-4f86-8602-7c31258ac2c3" containerName="controller-manager" containerID="cri-o://d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43" gracePeriod=30 Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.929058 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.931870 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" event={"ID":"19360de4-bc38-43e1-8b9a-8422b0213697","Type":"ContainerStarted","Data":"62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736"} Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.931907 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" event={"ID":"19360de4-bc38-43e1-8b9a-8422b0213697","Type":"ContainerStarted","Data":"728867e31e03aad92d4498ff6f8de7bfd6242048705270789e6c2ba91a23bf27"} Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.932029 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" podUID="19360de4-bc38-43e1-8b9a-8422b0213697" containerName="route-controller-manager" containerID="cri-o://62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736" gracePeriod=30 Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.932111 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.938101 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.948352 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" podStartSLOduration=2.9483361070000003 podStartE2EDuration="2.948336107s" podCreationTimestamp="2025-11-26 15:38:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:38:48.947115407 +0000 UTC m=+749.737832575" watchObservedRunningTime="2025-11-26 15:38:48.948336107 +0000 UTC m=+749.739053255" Nov 26 15:38:48 crc kubenswrapper[5010]: I1126 15:38:48.974733 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" podStartSLOduration=2.974718652 podStartE2EDuration="2.974718652s" podCreationTimestamp="2025-11-26 15:38:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:38:48.974531637 +0000 UTC m=+749.765248785" watchObservedRunningTime="2025-11-26 15:38:48.974718652 +0000 UTC m=+749.765435810" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.171683 5010 patch_prober.go:28] interesting pod/route-controller-manager-5fcdffd88f-m2ljc container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.44:8443/healthz\": read tcp 10.217.0.2:58990->10.217.0.44:8443: read: connection reset by peer" start-of-body= Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.171787 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" podUID="19360de4-bc38-43e1-8b9a-8422b0213697" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.44:8443/healthz\": read tcp 10.217.0.2:58990->10.217.0.44:8443: read: connection reset by peer" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.475884 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.508551 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-579bb66b4c-ghjg5"] Nov 26 15:38:49 crc kubenswrapper[5010]: E1126 15:38:49.508820 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edc69b5c-4248-4f86-8602-7c31258ac2c3" containerName="controller-manager" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.508832 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="edc69b5c-4248-4f86-8602-7c31258ac2c3" containerName="controller-manager" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.508930 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="edc69b5c-4248-4f86-8602-7c31258ac2c3" containerName="controller-manager" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.509394 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.521967 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-579bb66b4c-ghjg5"] Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.532838 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-5fcdffd88f-m2ljc_19360de4-bc38-43e1-8b9a-8422b0213697/route-controller-manager/0.log" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.532941 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.585992 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19360de4-bc38-43e1-8b9a-8422b0213697-serving-cert\") pod \"19360de4-bc38-43e1-8b9a-8422b0213697\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.586047 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-config\") pod \"edc69b5c-4248-4f86-8602-7c31258ac2c3\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.586471 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edc69b5c-4248-4f86-8602-7c31258ac2c3-serving-cert\") pod \"edc69b5c-4248-4f86-8602-7c31258ac2c3\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.586511 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-client-ca\") pod \"edc69b5c-4248-4f86-8602-7c31258ac2c3\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.586579 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-config\") pod \"19360de4-bc38-43e1-8b9a-8422b0213697\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.586623 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-proxy-ca-bundles\") pod \"edc69b5c-4248-4f86-8602-7c31258ac2c3\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.586652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q8lzm\" (UniqueName: \"kubernetes.io/projected/19360de4-bc38-43e1-8b9a-8422b0213697-kube-api-access-q8lzm\") pod \"19360de4-bc38-43e1-8b9a-8422b0213697\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.586682 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqcz7\" (UniqueName: \"kubernetes.io/projected/edc69b5c-4248-4f86-8602-7c31258ac2c3-kube-api-access-gqcz7\") pod \"edc69b5c-4248-4f86-8602-7c31258ac2c3\" (UID: \"edc69b5c-4248-4f86-8602-7c31258ac2c3\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.587664 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "edc69b5c-4248-4f86-8602-7c31258ac2c3" (UID: "edc69b5c-4248-4f86-8602-7c31258ac2c3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.587690 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-config" (OuterVolumeSpecName: "config") pod "19360de4-bc38-43e1-8b9a-8422b0213697" (UID: "19360de4-bc38-43e1-8b9a-8422b0213697"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.587938 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-client-ca\") pod \"19360de4-bc38-43e1-8b9a-8422b0213697\" (UID: \"19360de4-bc38-43e1-8b9a-8422b0213697\") " Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.588036 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-config" (OuterVolumeSpecName: "config") pod "edc69b5c-4248-4f86-8602-7c31258ac2c3" (UID: "edc69b5c-4248-4f86-8602-7c31258ac2c3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.588567 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-client-ca" (OuterVolumeSpecName: "client-ca") pod "19360de4-bc38-43e1-8b9a-8422b0213697" (UID: "19360de4-bc38-43e1-8b9a-8422b0213697"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.588663 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-client-ca\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.588862 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-config\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.588928 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tscz6\" (UniqueName: \"kubernetes.io/projected/f7f96bd4-6326-4105-b169-470b0ae2b9bc-kube-api-access-tscz6\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.589053 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f96bd4-6326-4105-b169-470b0ae2b9bc-serving-cert\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.589103 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-client-ca" (OuterVolumeSpecName: "client-ca") pod "edc69b5c-4248-4f86-8602-7c31258ac2c3" (UID: "edc69b5c-4248-4f86-8602-7c31258ac2c3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.589187 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-proxy-ca-bundles\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.590598 5010 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.590620 5010 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.590631 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.590642 5010 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/edc69b5c-4248-4f86-8602-7c31258ac2c3-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.590652 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/19360de4-bc38-43e1-8b9a-8422b0213697-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.593542 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19360de4-bc38-43e1-8b9a-8422b0213697-kube-api-access-q8lzm" (OuterVolumeSpecName: "kube-api-access-q8lzm") pod "19360de4-bc38-43e1-8b9a-8422b0213697" (UID: "19360de4-bc38-43e1-8b9a-8422b0213697"). InnerVolumeSpecName "kube-api-access-q8lzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.596116 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/edc69b5c-4248-4f86-8602-7c31258ac2c3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "edc69b5c-4248-4f86-8602-7c31258ac2c3" (UID: "edc69b5c-4248-4f86-8602-7c31258ac2c3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.597800 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19360de4-bc38-43e1-8b9a-8422b0213697-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "19360de4-bc38-43e1-8b9a-8422b0213697" (UID: "19360de4-bc38-43e1-8b9a-8422b0213697"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.610577 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edc69b5c-4248-4f86-8602-7c31258ac2c3-kube-api-access-gqcz7" (OuterVolumeSpecName: "kube-api-access-gqcz7") pod "edc69b5c-4248-4f86-8602-7c31258ac2c3" (UID: "edc69b5c-4248-4f86-8602-7c31258ac2c3"). InnerVolumeSpecName "kube-api-access-gqcz7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692604 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-proxy-ca-bundles\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692736 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-client-ca\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692788 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-config\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692843 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tscz6\" (UniqueName: \"kubernetes.io/projected/f7f96bd4-6326-4105-b169-470b0ae2b9bc-kube-api-access-tscz6\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692877 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f96bd4-6326-4105-b169-470b0ae2b9bc-serving-cert\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692966 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q8lzm\" (UniqueName: \"kubernetes.io/projected/19360de4-bc38-43e1-8b9a-8422b0213697-kube-api-access-q8lzm\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692981 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqcz7\" (UniqueName: \"kubernetes.io/projected/edc69b5c-4248-4f86-8602-7c31258ac2c3-kube-api-access-gqcz7\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.692998 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/19360de4-bc38-43e1-8b9a-8422b0213697-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.693012 5010 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/edc69b5c-4248-4f86-8602-7c31258ac2c3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.694618 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-proxy-ca-bundles\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.694860 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-client-ca\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.695846 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7f96bd4-6326-4105-b169-470b0ae2b9bc-config\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.700650 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f7f96bd4-6326-4105-b169-470b0ae2b9bc-serving-cert\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.712454 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tscz6\" (UniqueName: \"kubernetes.io/projected/f7f96bd4-6326-4105-b169-470b0ae2b9bc-kube-api-access-tscz6\") pod \"controller-manager-579bb66b4c-ghjg5\" (UID: \"f7f96bd4-6326-4105-b169-470b0ae2b9bc\") " pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.830206 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.908583 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ef3bb6f-39b5-4919-90a4-897d4841c9f1" path="/var/lib/kubelet/pods/6ef3bb6f-39b5-4919-90a4-897d4841c9f1/volumes" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.909309 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a33ae656-009d-4adb-80ef-143cb00bba21" path="/var/lib/kubelet/pods/a33ae656-009d-4adb-80ef-143cb00bba21/volumes" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.972006 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-route-controller-manager_route-controller-manager-5fcdffd88f-m2ljc_19360de4-bc38-43e1-8b9a-8422b0213697/route-controller-manager/0.log" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.972069 5010 generic.go:334] "Generic (PLEG): container finished" podID="19360de4-bc38-43e1-8b9a-8422b0213697" containerID="62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736" exitCode=255 Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.972287 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.973137 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" event={"ID":"19360de4-bc38-43e1-8b9a-8422b0213697","Type":"ContainerDied","Data":"62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736"} Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.973178 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc" event={"ID":"19360de4-bc38-43e1-8b9a-8422b0213697","Type":"ContainerDied","Data":"728867e31e03aad92d4498ff6f8de7bfd6242048705270789e6c2ba91a23bf27"} Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.973225 5010 scope.go:117] "RemoveContainer" containerID="62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.975879 5010 generic.go:334] "Generic (PLEG): container finished" podID="edc69b5c-4248-4f86-8602-7c31258ac2c3" containerID="d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43" exitCode=0 Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.975902 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" event={"ID":"edc69b5c-4248-4f86-8602-7c31258ac2c3","Type":"ContainerDied","Data":"d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43"} Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.975917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" event={"ID":"edc69b5c-4248-4f86-8602-7c31258ac2c3","Type":"ContainerDied","Data":"a8e5b3573a37f8041dbc38cae44fdd1e7fac7accf6418a9e6aede2d3d059c0fe"} Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.975955 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5" Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.995183 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc"] Nov 26 15:38:49 crc kubenswrapper[5010]: I1126 15:38:49.997880 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fcdffd88f-m2ljc"] Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.007451 5010 scope.go:117] "RemoveContainer" containerID="62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736" Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.012092 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5"] Nov 26 15:38:50 crc kubenswrapper[5010]: E1126 15:38:50.012163 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736\": container with ID starting with 62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736 not found: ID does not exist" containerID="62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736" Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.012214 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736"} err="failed to get container status \"62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736\": rpc error: code = NotFound desc = could not find container \"62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736\": container with ID starting with 62eaf8e369eb1945fd3b9179e6c4508d5995e5881ce29d8ac4e1b3819bdc1736 not found: ID does not exist" Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.012252 5010 scope.go:117] "RemoveContainer" containerID="d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43" Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.015062 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-dc5b66b4d-4x6v5"] Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.031875 5010 scope.go:117] "RemoveContainer" containerID="d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43" Nov 26 15:38:50 crc kubenswrapper[5010]: E1126 15:38:50.032361 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43\": container with ID starting with d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43 not found: ID does not exist" containerID="d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43" Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.032411 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43"} err="failed to get container status \"d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43\": rpc error: code = NotFound desc = could not find container \"d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43\": container with ID starting with d049935231309da905f1c2c6a11b8a6ed4d3001ef4fc84b5e92047dc26427c43 not found: ID does not exist" Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.084526 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-579bb66b4c-ghjg5"] Nov 26 15:38:50 crc kubenswrapper[5010]: W1126 15:38:50.091891 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7f96bd4_6326_4105_b169_470b0ae2b9bc.slice/crio-f15de58ffd88997bda0936a4b13c96a349c6e950c625816df918129b240d8d23 WatchSource:0}: Error finding container f15de58ffd88997bda0936a4b13c96a349c6e950c625816df918129b240d8d23: Status 404 returned error can't find the container with id f15de58ffd88997bda0936a4b13c96a349c6e950c625816df918129b240d8d23 Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.986522 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" event={"ID":"f7f96bd4-6326-4105-b169-470b0ae2b9bc","Type":"ContainerStarted","Data":"4ba411aad63b4ea8855986f93b09b2c52f648b408f02de8f2256243ddac8430e"} Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.987311 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" event={"ID":"f7f96bd4-6326-4105-b169-470b0ae2b9bc","Type":"ContainerStarted","Data":"f15de58ffd88997bda0936a4b13c96a349c6e950c625816df918129b240d8d23"} Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.987374 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:50 crc kubenswrapper[5010]: I1126 15:38:50.999112 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.011786 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-579bb66b4c-ghjg5" podStartSLOduration=4.011769231 podStartE2EDuration="4.011769231s" podCreationTimestamp="2025-11-26 15:38:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:38:51.009267069 +0000 UTC m=+751.799984217" watchObservedRunningTime="2025-11-26 15:38:51.011769231 +0000 UTC m=+751.802486389" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.650227 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf"] Nov 26 15:38:51 crc kubenswrapper[5010]: E1126 15:38:51.650569 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19360de4-bc38-43e1-8b9a-8422b0213697" containerName="route-controller-manager" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.650586 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="19360de4-bc38-43e1-8b9a-8422b0213697" containerName="route-controller-manager" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.650724 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="19360de4-bc38-43e1-8b9a-8422b0213697" containerName="route-controller-manager" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.651279 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.653650 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.654165 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.654439 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.655066 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.655136 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.655978 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.669093 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf"] Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.726775 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9561320-14a7-48b9-a967-e31cf86990d2-serving-cert\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.726988 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7w2z8\" (UniqueName: \"kubernetes.io/projected/f9561320-14a7-48b9-a967-e31cf86990d2-kube-api-access-7w2z8\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.727357 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9561320-14a7-48b9-a967-e31cf86990d2-config\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.727655 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9561320-14a7-48b9-a967-e31cf86990d2-client-ca\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.829076 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7w2z8\" (UniqueName: \"kubernetes.io/projected/f9561320-14a7-48b9-a967-e31cf86990d2-kube-api-access-7w2z8\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.829186 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9561320-14a7-48b9-a967-e31cf86990d2-config\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.829277 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9561320-14a7-48b9-a967-e31cf86990d2-client-ca\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.829337 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9561320-14a7-48b9-a967-e31cf86990d2-serving-cert\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.831506 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f9561320-14a7-48b9-a967-e31cf86990d2-client-ca\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.832640 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9561320-14a7-48b9-a967-e31cf86990d2-config\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.842162 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f9561320-14a7-48b9-a967-e31cf86990d2-serving-cert\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.854508 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7w2z8\" (UniqueName: \"kubernetes.io/projected/f9561320-14a7-48b9-a967-e31cf86990d2-kube-api-access-7w2z8\") pod \"route-controller-manager-55468755b8-mpwpf\" (UID: \"f9561320-14a7-48b9-a967-e31cf86990d2\") " pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.902275 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19360de4-bc38-43e1-8b9a-8422b0213697" path="/var/lib/kubelet/pods/19360de4-bc38-43e1-8b9a-8422b0213697/volumes" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.903272 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edc69b5c-4248-4f86-8602-7c31258ac2c3" path="/var/lib/kubelet/pods/edc69b5c-4248-4f86-8602-7c31258ac2c3/volumes" Nov 26 15:38:51 crc kubenswrapper[5010]: I1126 15:38:51.972578 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.368601 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.370690 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.376587 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-gsklg" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.386926 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.390837 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.391698 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.394848 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.416788 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.422411 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-cf7fq"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.423439 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.437239 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-ovs-socket\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.437285 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c21d70d9-5cf1-46c9-95af-510e964cfff9-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-qgmx6\" (UID: \"c21d70d9-5cf1-46c9-95af-510e964cfff9\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.437318 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhblv\" (UniqueName: \"kubernetes.io/projected/79ffe6fa-990a-422f-ba69-151aacb5592b-kube-api-access-rhblv\") pod \"nmstate-metrics-5dcf9c57c5-8pmmg\" (UID: \"79ffe6fa-990a-422f-ba69-151aacb5592b\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.437356 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-dbus-socket\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.437422 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6swqs\" (UniqueName: \"kubernetes.io/projected/cd932913-4bd6-409f-bc77-688af8d29524-kube-api-access-6swqs\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.437472 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ms7gq\" (UniqueName: \"kubernetes.io/projected/c21d70d9-5cf1-46c9-95af-510e964cfff9-kube-api-access-ms7gq\") pod \"nmstate-webhook-6b89b748d8-qgmx6\" (UID: \"c21d70d9-5cf1-46c9-95af-510e964cfff9\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.437493 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-nmstate-lock\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.451165 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539293 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6swqs\" (UniqueName: \"kubernetes.io/projected/cd932913-4bd6-409f-bc77-688af8d29524-kube-api-access-6swqs\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539369 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ms7gq\" (UniqueName: \"kubernetes.io/projected/c21d70d9-5cf1-46c9-95af-510e964cfff9-kube-api-access-ms7gq\") pod \"nmstate-webhook-6b89b748d8-qgmx6\" (UID: \"c21d70d9-5cf1-46c9-95af-510e964cfff9\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539391 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-nmstate-lock\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539418 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c21d70d9-5cf1-46c9-95af-510e964cfff9-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-qgmx6\" (UID: \"c21d70d9-5cf1-46c9-95af-510e964cfff9\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539434 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-ovs-socket\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539453 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhblv\" (UniqueName: \"kubernetes.io/projected/79ffe6fa-990a-422f-ba69-151aacb5592b-kube-api-access-rhblv\") pod \"nmstate-metrics-5dcf9c57c5-8pmmg\" (UID: \"79ffe6fa-990a-422f-ba69-151aacb5592b\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539475 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-dbus-socket\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539814 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-dbus-socket\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539852 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-ovs-socket\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.539868 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cd932913-4bd6-409f-bc77-688af8d29524-nmstate-lock\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: E1126 15:38:52.539882 5010 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 26 15:38:52 crc kubenswrapper[5010]: E1126 15:38:52.539960 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c21d70d9-5cf1-46c9-95af-510e964cfff9-tls-key-pair podName:c21d70d9-5cf1-46c9-95af-510e964cfff9 nodeName:}" failed. No retries permitted until 2025-11-26 15:38:53.039938649 +0000 UTC m=+753.830655797 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/c21d70d9-5cf1-46c9-95af-510e964cfff9-tls-key-pair") pod "nmstate-webhook-6b89b748d8-qgmx6" (UID: "c21d70d9-5cf1-46c9-95af-510e964cfff9") : secret "openshift-nmstate-webhook" not found Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.565619 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.566701 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.573460 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.573503 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-qn5m9" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.573598 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.580141 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ms7gq\" (UniqueName: \"kubernetes.io/projected/c21d70d9-5cf1-46c9-95af-510e964cfff9-kube-api-access-ms7gq\") pod \"nmstate-webhook-6b89b748d8-qgmx6\" (UID: \"c21d70d9-5cf1-46c9-95af-510e964cfff9\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.581839 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6swqs\" (UniqueName: \"kubernetes.io/projected/cd932913-4bd6-409f-bc77-688af8d29524-kube-api-access-6swqs\") pod \"nmstate-handler-cf7fq\" (UID: \"cd932913-4bd6-409f-bc77-688af8d29524\") " pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.587928 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhblv\" (UniqueName: \"kubernetes.io/projected/79ffe6fa-990a-422f-ba69-151aacb5592b-kube-api-access-rhblv\") pod \"nmstate-metrics-5dcf9c57c5-8pmmg\" (UID: \"79ffe6fa-990a-422f-ba69-151aacb5592b\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.592606 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.640846 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.640997 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhdpt\" (UniqueName: \"kubernetes.io/projected/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-kube-api-access-vhdpt\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.641047 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.690537 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.742222 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhdpt\" (UniqueName: \"kubernetes.io/projected/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-kube-api-access-vhdpt\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.742763 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.742817 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.744159 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.748598 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.751520 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.765012 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhdpt\" (UniqueName: \"kubernetes.io/projected/5dfb6640-deaa-4758-ac2a-bd2cc1db4508-kube-api-access-vhdpt\") pod \"nmstate-console-plugin-5874bd7bc5-4cbrf\" (UID: \"5dfb6640-deaa-4758-ac2a-bd2cc1db4508\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.892616 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.904465 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-89c65d5fc-sm6pr"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.905480 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.922258 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-89c65d5fc-sm6pr"] Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.948603 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvcrz\" (UniqueName: \"kubernetes.io/projected/5802b98e-2855-4736-bbf7-c879a639b96d-kube-api-access-cvcrz\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.948668 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-trusted-ca-bundle\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.948701 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-console-config\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.948745 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5802b98e-2855-4736-bbf7-c879a639b96d-console-oauth-config\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.948781 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-oauth-serving-cert\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.948815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5802b98e-2855-4736-bbf7-c879a639b96d-console-serving-cert\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:52 crc kubenswrapper[5010]: I1126 15:38:52.948839 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-service-ca\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.015280 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-cf7fq" event={"ID":"cd932913-4bd6-409f-bc77-688af8d29524","Type":"ContainerStarted","Data":"06f479c0e04e2dcfa07f48e2337195de74d1cbfede164ca4cbd55ca88889eee4"} Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.017506 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" event={"ID":"f9561320-14a7-48b9-a967-e31cf86990d2","Type":"ContainerStarted","Data":"a1e658d61271b45d3ee478bb5d5c640d182cfa85e67c27fc6c2289bba3035db7"} Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.051943 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvcrz\" (UniqueName: \"kubernetes.io/projected/5802b98e-2855-4736-bbf7-c879a639b96d-kube-api-access-cvcrz\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.052342 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-trusted-ca-bundle\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.052387 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-console-config\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.052413 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5802b98e-2855-4736-bbf7-c879a639b96d-console-oauth-config\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.052535 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-oauth-serving-cert\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.052577 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5802b98e-2855-4736-bbf7-c879a639b96d-console-serving-cert\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.052620 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-service-ca\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.052655 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c21d70d9-5cf1-46c9-95af-510e964cfff9-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-qgmx6\" (UID: \"c21d70d9-5cf1-46c9-95af-510e964cfff9\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.053729 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-oauth-serving-cert\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.053807 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-service-ca\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.053964 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-console-config\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.054214 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5802b98e-2855-4736-bbf7-c879a639b96d-trusted-ca-bundle\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.067587 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/5802b98e-2855-4736-bbf7-c879a639b96d-console-oauth-config\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.067601 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/5802b98e-2855-4736-bbf7-c879a639b96d-console-serving-cert\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.070795 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/c21d70d9-5cf1-46c9-95af-510e964cfff9-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-qgmx6\" (UID: \"c21d70d9-5cf1-46c9-95af-510e964cfff9\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.075758 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvcrz\" (UniqueName: \"kubernetes.io/projected/5802b98e-2855-4736-bbf7-c879a639b96d-kube-api-access-cvcrz\") pod \"console-89c65d5fc-sm6pr\" (UID: \"5802b98e-2855-4736-bbf7-c879a639b96d\") " pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.195055 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg"] Nov 26 15:38:53 crc kubenswrapper[5010]: W1126 15:38:53.215099 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79ffe6fa_990a_422f_ba69_151aacb5592b.slice/crio-b8f27b96eb7e143379d54fbbc9439f68af9d6d9a39886d0c5361f0847723fca9 WatchSource:0}: Error finding container b8f27b96eb7e143379d54fbbc9439f68af9d6d9a39886d0c5361f0847723fca9: Status 404 returned error can't find the container with id b8f27b96eb7e143379d54fbbc9439f68af9d6d9a39886d0c5361f0847723fca9 Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.267397 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.309546 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.358986 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf"] Nov 26 15:38:53 crc kubenswrapper[5010]: W1126 15:38:53.396047 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5dfb6640_deaa_4758_ac2a_bd2cc1db4508.slice/crio-d5e289f5b7195e468b8f5487f7c9927965c4c4c54d796ca5d50caa225912e373 WatchSource:0}: Error finding container d5e289f5b7195e468b8f5487f7c9927965c4c4c54d796ca5d50caa225912e373: Status 404 returned error can't find the container with id d5e289f5b7195e468b8f5487f7c9927965c4c4c54d796ca5d50caa225912e373 Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.720893 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-89c65d5fc-sm6pr"] Nov 26 15:38:53 crc kubenswrapper[5010]: W1126 15:38:53.728091 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5802b98e_2855_4736_bbf7_c879a639b96d.slice/crio-a047538969897e0ff44bcca76f40e171b017d9b00711bff37bfebcee3303064f WatchSource:0}: Error finding container a047538969897e0ff44bcca76f40e171b017d9b00711bff37bfebcee3303064f: Status 404 returned error can't find the container with id a047538969897e0ff44bcca76f40e171b017d9b00711bff37bfebcee3303064f Nov 26 15:38:53 crc kubenswrapper[5010]: I1126 15:38:53.790490 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6"] Nov 26 15:38:53 crc kubenswrapper[5010]: W1126 15:38:53.794254 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc21d70d9_5cf1_46c9_95af_510e964cfff9.slice/crio-915ce08d91ffeafc52610c057ce7a320a041c37bdb7437e773de712d64df40ed WatchSource:0}: Error finding container 915ce08d91ffeafc52610c057ce7a320a041c37bdb7437e773de712d64df40ed: Status 404 returned error can't find the container with id 915ce08d91ffeafc52610c057ce7a320a041c37bdb7437e773de712d64df40ed Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.022917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-89c65d5fc-sm6pr" event={"ID":"5802b98e-2855-4736-bbf7-c879a639b96d","Type":"ContainerStarted","Data":"a047538969897e0ff44bcca76f40e171b017d9b00711bff37bfebcee3303064f"} Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.025034 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" event={"ID":"c21d70d9-5cf1-46c9-95af-510e964cfff9","Type":"ContainerStarted","Data":"915ce08d91ffeafc52610c057ce7a320a041c37bdb7437e773de712d64df40ed"} Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.027224 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" event={"ID":"79ffe6fa-990a-422f-ba69-151aacb5592b","Type":"ContainerStarted","Data":"b8f27b96eb7e143379d54fbbc9439f68af9d6d9a39886d0c5361f0847723fca9"} Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.029745 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" event={"ID":"f9561320-14a7-48b9-a967-e31cf86990d2","Type":"ContainerStarted","Data":"d4fe8a19d092dda63a27f7f8abc4e4465ef6ff58888ff41652e78775e84249fe"} Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.030046 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.031339 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" event={"ID":"5dfb6640-deaa-4758-ac2a-bd2cc1db4508","Type":"ContainerStarted","Data":"d5e289f5b7195e468b8f5487f7c9927965c4c4c54d796ca5d50caa225912e373"} Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.038526 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" Nov 26 15:38:54 crc kubenswrapper[5010]: I1126 15:38:54.060099 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-55468755b8-mpwpf" podStartSLOduration=7.060074067 podStartE2EDuration="7.060074067s" podCreationTimestamp="2025-11-26 15:38:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:38:54.054919799 +0000 UTC m=+754.845636957" watchObservedRunningTime="2025-11-26 15:38:54.060074067 +0000 UTC m=+754.850791225" Nov 26 15:38:57 crc kubenswrapper[5010]: I1126 15:38:57.060647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-89c65d5fc-sm6pr" event={"ID":"5802b98e-2855-4736-bbf7-c879a639b96d","Type":"ContainerStarted","Data":"937bde3d4cc26ef189b8a8238704d4633b16d860416e9c20c86bf48f681ecbf5"} Nov 26 15:38:57 crc kubenswrapper[5010]: I1126 15:38:57.096612 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-89c65d5fc-sm6pr" podStartSLOduration=5.096584329 podStartE2EDuration="5.096584329s" podCreationTimestamp="2025-11-26 15:38:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:38:57.089305738 +0000 UTC m=+757.880022926" watchObservedRunningTime="2025-11-26 15:38:57.096584329 +0000 UTC m=+757.887301557" Nov 26 15:38:57 crc kubenswrapper[5010]: I1126 15:38:57.671228 5010 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 15:39:03 crc kubenswrapper[5010]: I1126 15:39:03.267894 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:39:03 crc kubenswrapper[5010]: I1126 15:39:03.269117 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:39:03 crc kubenswrapper[5010]: I1126 15:39:03.276621 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:39:04 crc kubenswrapper[5010]: I1126 15:39:04.174540 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-89c65d5fc-sm6pr" Nov 26 15:39:04 crc kubenswrapper[5010]: I1126 15:39:04.246945 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-rh2vd"] Nov 26 15:39:09 crc kubenswrapper[5010]: E1126 15:39:09.987603 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8b42f29676503074095f2837b044f2e228eaff3b25ab9a4c7c6165cb5d4c6892" Nov 26 15:39:09 crc kubenswrapper[5010]: E1126 15:39:09.989229 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:nmstate-metrics,Image:registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8b42f29676503074095f2837b044f2e228eaff3b25ab9a4c7c6165cb5d4c6892,Command:[manager],Args:[--zap-time-encoding=iso8601],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:WATCH_NAMESPACE,Value:,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:POD_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:RUN_METRICS_MANAGER,Value:,ValueFrom:nil,},EnvVar{Name:OPERATOR_NAME,Value:nmstate,ValueFrom:nil,},EnvVar{Name:ENABLE_PROFILER,Value:False,ValueFrom:nil,},EnvVar{Name:PROFILER_PORT,Value:6060,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{30 -3} {} 30m DecimalSI},memory: {{20971520 0} {} 20Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rhblv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000680000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nmstate-metrics-5dcf9c57c5-8pmmg_openshift-nmstate(79ffe6fa-990a-422f-ba69-151aacb5592b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:39:11 crc kubenswrapper[5010]: I1126 15:39:11.225206 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-cf7fq" event={"ID":"cd932913-4bd6-409f-bc77-688af8d29524","Type":"ContainerStarted","Data":"f90078ea9a8f40fc16c38467faac479788235bd693593200b8776e610cba4c78"} Nov 26 15:39:11 crc kubenswrapper[5010]: I1126 15:39:11.226082 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:39:11 crc kubenswrapper[5010]: I1126 15:39:11.249390 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-cf7fq" podStartSLOduration=1.663428048 podStartE2EDuration="19.249373266s" podCreationTimestamp="2025-11-26 15:38:52 +0000 UTC" firstStartedPulling="2025-11-26 15:38:52.823036401 +0000 UTC m=+753.613753559" lastFinishedPulling="2025-11-26 15:39:10.408981599 +0000 UTC m=+771.199698777" observedRunningTime="2025-11-26 15:39:11.24509764 +0000 UTC m=+772.035814788" watchObservedRunningTime="2025-11-26 15:39:11.249373266 +0000 UTC m=+772.040090414" Nov 26 15:39:12 crc kubenswrapper[5010]: I1126 15:39:12.234034 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" event={"ID":"5dfb6640-deaa-4758-ac2a-bd2cc1db4508","Type":"ContainerStarted","Data":"1446d172ae0d88ab36eecbf1c3120aeed9ee1b8b29b432b41fd54aca8487793e"} Nov 26 15:39:12 crc kubenswrapper[5010]: I1126 15:39:12.238288 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" event={"ID":"c21d70d9-5cf1-46c9-95af-510e964cfff9","Type":"ContainerStarted","Data":"63a9a952bd149bbf73c3575ca5640eb2670a8ab9f103dfdfa2abb22c75b7b003"} Nov 26 15:39:12 crc kubenswrapper[5010]: I1126 15:39:12.257206 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-4cbrf" podStartSLOduration=2.53112294 podStartE2EDuration="20.257179404s" podCreationTimestamp="2025-11-26 15:38:52 +0000 UTC" firstStartedPulling="2025-11-26 15:38:53.405901739 +0000 UTC m=+754.196618887" lastFinishedPulling="2025-11-26 15:39:11.131958183 +0000 UTC m=+771.922675351" observedRunningTime="2025-11-26 15:39:12.254329284 +0000 UTC m=+773.045046452" watchObservedRunningTime="2025-11-26 15:39:12.257179404 +0000 UTC m=+773.047896562" Nov 26 15:39:12 crc kubenswrapper[5010]: I1126 15:39:12.281291 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" podStartSLOduration=3.638283265 podStartE2EDuration="20.281268282s" podCreationTimestamp="2025-11-26 15:38:52 +0000 UTC" firstStartedPulling="2025-11-26 15:38:53.79979015 +0000 UTC m=+754.590507298" lastFinishedPulling="2025-11-26 15:39:10.442775167 +0000 UTC m=+771.233492315" observedRunningTime="2025-11-26 15:39:12.276933834 +0000 UTC m=+773.067651092" watchObservedRunningTime="2025-11-26 15:39:12.281268282 +0000 UTC m=+773.071985440" Nov 26 15:39:13 crc kubenswrapper[5010]: I1126 15:39:13.244303 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:39:15 crc kubenswrapper[5010]: E1126 15:39:15.328214 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" podUID="79ffe6fa-990a-422f-ba69-151aacb5592b" Nov 26 15:39:16 crc kubenswrapper[5010]: I1126 15:39:16.270206 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" event={"ID":"79ffe6fa-990a-422f-ba69-151aacb5592b","Type":"ContainerStarted","Data":"54664107cd7140cabb8f4d6751a7791b3573fc7d043a5e4d02242bcf74d742e5"} Nov 26 15:39:16 crc kubenswrapper[5010]: E1126 15:39:16.275957 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8b42f29676503074095f2837b044f2e228eaff3b25ab9a4c7c6165cb5d4c6892\\\"\"" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" podUID="79ffe6fa-990a-422f-ba69-151aacb5592b" Nov 26 15:39:17 crc kubenswrapper[5010]: E1126 15:39:17.279144 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"nmstate-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/openshift4/ose-kubernetes-nmstate-handler-rhel9@sha256:8b42f29676503074095f2837b044f2e228eaff3b25ab9a4c7c6165cb5d4c6892\\\"\"" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" podUID="79ffe6fa-990a-422f-ba69-151aacb5592b" Nov 26 15:39:17 crc kubenswrapper[5010]: I1126 15:39:17.779820 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-cf7fq" Nov 26 15:39:23 crc kubenswrapper[5010]: I1126 15:39:23.316523 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-qgmx6" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.612597 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-s97kq"] Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.614284 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.626272 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s97kq"] Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.816343 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-catalog-content\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.816413 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bqgl\" (UniqueName: \"kubernetes.io/projected/c2c88156-4ea0-4113-a344-5550f5ed016c-kube-api-access-8bqgl\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.816521 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-utilities\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.917541 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-catalog-content\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.917590 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bqgl\" (UniqueName: \"kubernetes.io/projected/c2c88156-4ea0-4113-a344-5550f5ed016c-kube-api-access-8bqgl\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.917662 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-utilities\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.918419 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-utilities\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.918442 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-catalog-content\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:25 crc kubenswrapper[5010]: I1126 15:39:25.943865 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bqgl\" (UniqueName: \"kubernetes.io/projected/c2c88156-4ea0-4113-a344-5550f5ed016c-kube-api-access-8bqgl\") pod \"redhat-operators-s97kq\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:26 crc kubenswrapper[5010]: I1126 15:39:26.243822 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:26 crc kubenswrapper[5010]: I1126 15:39:26.708295 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-s97kq"] Nov 26 15:39:27 crc kubenswrapper[5010]: I1126 15:39:27.351115 5010 generic.go:334] "Generic (PLEG): container finished" podID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerID="0dd1a7485e0b0ed84c51bb00442e562956f58ec762647401273bada9f860dd3a" exitCode=0 Nov 26 15:39:27 crc kubenswrapper[5010]: I1126 15:39:27.351252 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s97kq" event={"ID":"c2c88156-4ea0-4113-a344-5550f5ed016c","Type":"ContainerDied","Data":"0dd1a7485e0b0ed84c51bb00442e562956f58ec762647401273bada9f860dd3a"} Nov 26 15:39:27 crc kubenswrapper[5010]: I1126 15:39:27.351455 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s97kq" event={"ID":"c2c88156-4ea0-4113-a344-5550f5ed016c","Type":"ContainerStarted","Data":"940bff1bae5bfe7384b4f1cba1d374b3fa7f0944de705116474dfd137a7c47a9"} Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.324348 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-rh2vd" podUID="f0d44623-c021-45d4-bc90-b40247ec17ef" containerName="console" containerID="cri-o://8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b" gracePeriod=15 Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.367978 5010 generic.go:334] "Generic (PLEG): container finished" podID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerID="c2a35892adfe4f80ebf933c151509763e2a4f0a6f47e1d0c7be782236fff85b6" exitCode=0 Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.368032 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s97kq" event={"ID":"c2c88156-4ea0-4113-a344-5550f5ed016c","Type":"ContainerDied","Data":"c2a35892adfe4f80ebf933c151509763e2a4f0a6f47e1d0c7be782236fff85b6"} Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.802997 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-rh2vd_f0d44623-c021-45d4-bc90-b40247ec17ef/console/0.log" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.803090 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.982701 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-oauth-config\") pod \"f0d44623-c021-45d4-bc90-b40247ec17ef\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.982819 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lm8gv\" (UniqueName: \"kubernetes.io/projected/f0d44623-c021-45d4-bc90-b40247ec17ef-kube-api-access-lm8gv\") pod \"f0d44623-c021-45d4-bc90-b40247ec17ef\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.982853 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-oauth-serving-cert\") pod \"f0d44623-c021-45d4-bc90-b40247ec17ef\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.982919 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-trusted-ca-bundle\") pod \"f0d44623-c021-45d4-bc90-b40247ec17ef\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.982946 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-console-config\") pod \"f0d44623-c021-45d4-bc90-b40247ec17ef\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.982966 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-serving-cert\") pod \"f0d44623-c021-45d4-bc90-b40247ec17ef\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.983046 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-service-ca\") pod \"f0d44623-c021-45d4-bc90-b40247ec17ef\" (UID: \"f0d44623-c021-45d4-bc90-b40247ec17ef\") " Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.983596 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "f0d44623-c021-45d4-bc90-b40247ec17ef" (UID: "f0d44623-c021-45d4-bc90-b40247ec17ef"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.983617 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-console-config" (OuterVolumeSpecName: "console-config") pod "f0d44623-c021-45d4-bc90-b40247ec17ef" (UID: "f0d44623-c021-45d4-bc90-b40247ec17ef"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.983625 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-service-ca" (OuterVolumeSpecName: "service-ca") pod "f0d44623-c021-45d4-bc90-b40247ec17ef" (UID: "f0d44623-c021-45d4-bc90-b40247ec17ef"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.983781 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "f0d44623-c021-45d4-bc90-b40247ec17ef" (UID: "f0d44623-c021-45d4-bc90-b40247ec17ef"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.988153 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "f0d44623-c021-45d4-bc90-b40247ec17ef" (UID: "f0d44623-c021-45d4-bc90-b40247ec17ef"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.988333 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0d44623-c021-45d4-bc90-b40247ec17ef-kube-api-access-lm8gv" (OuterVolumeSpecName: "kube-api-access-lm8gv") pod "f0d44623-c021-45d4-bc90-b40247ec17ef" (UID: "f0d44623-c021-45d4-bc90-b40247ec17ef"). InnerVolumeSpecName "kube-api-access-lm8gv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:39:29 crc kubenswrapper[5010]: I1126 15:39:29.990486 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "f0d44623-c021-45d4-bc90-b40247ec17ef" (UID: "f0d44623-c021-45d4-bc90-b40247ec17ef"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.083985 5010 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.084013 5010 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.084025 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lm8gv\" (UniqueName: \"kubernetes.io/projected/f0d44623-c021-45d4-bc90-b40247ec17ef-kube-api-access-lm8gv\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.084033 5010 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.084043 5010 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.084052 5010 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f0d44623-c021-45d4-bc90-b40247ec17ef-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.084060 5010 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f0d44623-c021-45d4-bc90-b40247ec17ef-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.375877 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-rh2vd_f0d44623-c021-45d4-bc90-b40247ec17ef/console/0.log" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.375932 5010 generic.go:334] "Generic (PLEG): container finished" podID="f0d44623-c021-45d4-bc90-b40247ec17ef" containerID="8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b" exitCode=2 Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.375967 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rh2vd" event={"ID":"f0d44623-c021-45d4-bc90-b40247ec17ef","Type":"ContainerDied","Data":"8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b"} Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.376180 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-rh2vd" event={"ID":"f0d44623-c021-45d4-bc90-b40247ec17ef","Type":"ContainerDied","Data":"d2bf51c833c400124d6e5f186a8f683141613e09416f8a2633afd9bd4c85fce8"} Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.376206 5010 scope.go:117] "RemoveContainer" containerID="8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.376357 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-rh2vd" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.401381 5010 scope.go:117] "RemoveContainer" containerID="8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b" Nov 26 15:39:30 crc kubenswrapper[5010]: E1126 15:39:30.401685 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b\": container with ID starting with 8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b not found: ID does not exist" containerID="8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.401733 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b"} err="failed to get container status \"8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b\": rpc error: code = NotFound desc = could not find container \"8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b\": container with ID starting with 8b5918b348f2e2c54d5bfe34d24623e600ee489ede7d0f9f688f161a0a3c5b1b not found: ID does not exist" Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.405731 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-rh2vd"] Nov 26 15:39:30 crc kubenswrapper[5010]: I1126 15:39:30.409241 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-rh2vd"] Nov 26 15:39:31 crc kubenswrapper[5010]: I1126 15:39:31.388251 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s97kq" event={"ID":"c2c88156-4ea0-4113-a344-5550f5ed016c","Type":"ContainerStarted","Data":"2cb3a598cde7f94cbbdc0c86c3dfd12ec79edd1ec515acfee9445c14216e50b3"} Nov 26 15:39:31 crc kubenswrapper[5010]: I1126 15:39:31.393383 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" event={"ID":"79ffe6fa-990a-422f-ba69-151aacb5592b","Type":"ContainerStarted","Data":"a8e6d0d4a7448b8e1b152ea59539ba8ae74186a20699fc1f78565b61e2e02884"} Nov 26 15:39:31 crc kubenswrapper[5010]: I1126 15:39:31.416242 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-s97kq" podStartSLOduration=3.655462043 podStartE2EDuration="6.416188244s" podCreationTimestamp="2025-11-26 15:39:25 +0000 UTC" firstStartedPulling="2025-11-26 15:39:27.353890337 +0000 UTC m=+788.144607485" lastFinishedPulling="2025-11-26 15:39:30.114616538 +0000 UTC m=+790.905333686" observedRunningTime="2025-11-26 15:39:31.414444911 +0000 UTC m=+792.205162169" watchObservedRunningTime="2025-11-26 15:39:31.416188244 +0000 UTC m=+792.206905392" Nov 26 15:39:31 crc kubenswrapper[5010]: I1126 15:39:31.440881 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-8pmmg" podStartSLOduration=2.5416606120000003 podStartE2EDuration="39.440860176s" podCreationTimestamp="2025-11-26 15:38:52 +0000 UTC" firstStartedPulling="2025-11-26 15:38:53.216950242 +0000 UTC m=+754.007667380" lastFinishedPulling="2025-11-26 15:39:30.116149796 +0000 UTC m=+790.906866944" observedRunningTime="2025-11-26 15:39:31.436502668 +0000 UTC m=+792.227219836" watchObservedRunningTime="2025-11-26 15:39:31.440860176 +0000 UTC m=+792.231577334" Nov 26 15:39:31 crc kubenswrapper[5010]: I1126 15:39:31.903936 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0d44623-c021-45d4-bc90-b40247ec17ef" path="/var/lib/kubelet/pods/f0d44623-c021-45d4-bc90-b40247ec17ef/volumes" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.244242 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.244784 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.418064 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l6rqp"] Nov 26 15:39:36 crc kubenswrapper[5010]: E1126 15:39:36.418325 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0d44623-c021-45d4-bc90-b40247ec17ef" containerName="console" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.418340 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0d44623-c021-45d4-bc90-b40247ec17ef" containerName="console" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.418489 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0d44623-c021-45d4-bc90-b40247ec17ef" containerName="console" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.419380 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.454741 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l6rqp"] Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.593783 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzwh4\" (UniqueName: \"kubernetes.io/projected/4dc6e8d8-57dd-48ec-824d-6bcbabded070-kube-api-access-kzwh4\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.594508 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-utilities\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.594606 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-catalog-content\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.696552 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-catalog-content\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.696617 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzwh4\" (UniqueName: \"kubernetes.io/projected/4dc6e8d8-57dd-48ec-824d-6bcbabded070-kube-api-access-kzwh4\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.696681 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-utilities\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.697540 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-utilities\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.697793 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-catalog-content\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.722016 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzwh4\" (UniqueName: \"kubernetes.io/projected/4dc6e8d8-57dd-48ec-824d-6bcbabded070-kube-api-access-kzwh4\") pod \"community-operators-l6rqp\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:36 crc kubenswrapper[5010]: I1126 15:39:36.766089 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:37 crc kubenswrapper[5010]: I1126 15:39:37.015990 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l6rqp"] Nov 26 15:39:37 crc kubenswrapper[5010]: I1126 15:39:37.295800 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-s97kq" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="registry-server" probeResult="failure" output=< Nov 26 15:39:37 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 15:39:37 crc kubenswrapper[5010]: > Nov 26 15:39:37 crc kubenswrapper[5010]: I1126 15:39:37.454580 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerStarted","Data":"4317d5b6b062af4f094f242e73ebd81cf4ca44ad12d4199f0dc0cb96c74accd6"} Nov 26 15:39:38 crc kubenswrapper[5010]: I1126 15:39:38.461921 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerStarted","Data":"15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11"} Nov 26 15:39:39 crc kubenswrapper[5010]: I1126 15:39:39.473476 5010 generic.go:334] "Generic (PLEG): container finished" podID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerID="15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11" exitCode=0 Nov 26 15:39:39 crc kubenswrapper[5010]: I1126 15:39:39.474011 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerDied","Data":"15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11"} Nov 26 15:39:40 crc kubenswrapper[5010]: I1126 15:39:40.485241 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerStarted","Data":"fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d"} Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.150697 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-65s2m"] Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.154302 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.155942 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-65s2m"] Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.177518 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cvmw\" (UniqueName: \"kubernetes.io/projected/96c01e8f-6078-41e9-be76-022c3aa2b3ae-kube-api-access-6cvmw\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.178252 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-catalog-content\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.178383 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-utilities\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.280123 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-utilities\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.280192 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cvmw\" (UniqueName: \"kubernetes.io/projected/96c01e8f-6078-41e9-be76-022c3aa2b3ae-kube-api-access-6cvmw\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.280224 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-catalog-content\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.281282 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-utilities\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.281293 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-catalog-content\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.323562 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cvmw\" (UniqueName: \"kubernetes.io/projected/96c01e8f-6078-41e9-be76-022c3aa2b3ae-kube-api-access-6cvmw\") pod \"redhat-marketplace-65s2m\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.484384 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.496168 5010 generic.go:334] "Generic (PLEG): container finished" podID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerID="fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d" exitCode=0 Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.496224 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerDied","Data":"fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d"} Nov 26 15:39:41 crc kubenswrapper[5010]: I1126 15:39:41.740554 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-65s2m"] Nov 26 15:39:42 crc kubenswrapper[5010]: I1126 15:39:42.505786 5010 generic.go:334] "Generic (PLEG): container finished" podID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerID="90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc" exitCode=0 Nov 26 15:39:42 crc kubenswrapper[5010]: I1126 15:39:42.505949 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65s2m" event={"ID":"96c01e8f-6078-41e9-be76-022c3aa2b3ae","Type":"ContainerDied","Data":"90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc"} Nov 26 15:39:42 crc kubenswrapper[5010]: I1126 15:39:42.506768 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65s2m" event={"ID":"96c01e8f-6078-41e9-be76-022c3aa2b3ae","Type":"ContainerStarted","Data":"e90ea06a365ae8218eca974cdb37e5cd3c1b61f5cdb4fe48ee108ffc323d2090"} Nov 26 15:39:42 crc kubenswrapper[5010]: I1126 15:39:42.512838 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerStarted","Data":"43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219"} Nov 26 15:39:42 crc kubenswrapper[5010]: I1126 15:39:42.561656 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l6rqp" podStartSLOduration=3.916747745 podStartE2EDuration="6.561629382s" podCreationTimestamp="2025-11-26 15:39:36 +0000 UTC" firstStartedPulling="2025-11-26 15:39:39.477069989 +0000 UTC m=+800.267787177" lastFinishedPulling="2025-11-26 15:39:42.121951666 +0000 UTC m=+802.912668814" observedRunningTime="2025-11-26 15:39:42.560765361 +0000 UTC m=+803.351482599" watchObservedRunningTime="2025-11-26 15:39:42.561629382 +0000 UTC m=+803.352346570" Nov 26 15:39:44 crc kubenswrapper[5010]: I1126 15:39:44.526761 5010 generic.go:334] "Generic (PLEG): container finished" podID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerID="65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201" exitCode=0 Nov 26 15:39:44 crc kubenswrapper[5010]: I1126 15:39:44.526886 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65s2m" event={"ID":"96c01e8f-6078-41e9-be76-022c3aa2b3ae","Type":"ContainerDied","Data":"65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201"} Nov 26 15:39:46 crc kubenswrapper[5010]: I1126 15:39:46.317802 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:46 crc kubenswrapper[5010]: I1126 15:39:46.389052 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:46 crc kubenswrapper[5010]: I1126 15:39:46.543854 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65s2m" event={"ID":"96c01e8f-6078-41e9-be76-022c3aa2b3ae","Type":"ContainerStarted","Data":"1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1"} Nov 26 15:39:46 crc kubenswrapper[5010]: I1126 15:39:46.567467 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-65s2m" podStartSLOduration=2.638239198 podStartE2EDuration="5.567448539s" podCreationTimestamp="2025-11-26 15:39:41 +0000 UTC" firstStartedPulling="2025-11-26 15:39:42.509426887 +0000 UTC m=+803.300144045" lastFinishedPulling="2025-11-26 15:39:45.438636228 +0000 UTC m=+806.229353386" observedRunningTime="2025-11-26 15:39:46.5654803 +0000 UTC m=+807.356197458" watchObservedRunningTime="2025-11-26 15:39:46.567448539 +0000 UTC m=+807.358165687" Nov 26 15:39:46 crc kubenswrapper[5010]: I1126 15:39:46.767691 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:46 crc kubenswrapper[5010]: I1126 15:39:46.767791 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:46 crc kubenswrapper[5010]: I1126 15:39:46.839206 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:47 crc kubenswrapper[5010]: I1126 15:39:47.620007 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:49 crc kubenswrapper[5010]: I1126 15:39:49.805431 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s97kq"] Nov 26 15:39:49 crc kubenswrapper[5010]: I1126 15:39:49.805821 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-s97kq" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="registry-server" containerID="cri-o://2cb3a598cde7f94cbbdc0c86c3dfd12ec79edd1ec515acfee9445c14216e50b3" gracePeriod=2 Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.576891 5010 generic.go:334] "Generic (PLEG): container finished" podID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerID="2cb3a598cde7f94cbbdc0c86c3dfd12ec79edd1ec515acfee9445c14216e50b3" exitCode=0 Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.577508 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s97kq" event={"ID":"c2c88156-4ea0-4113-a344-5550f5ed016c","Type":"ContainerDied","Data":"2cb3a598cde7f94cbbdc0c86c3dfd12ec79edd1ec515acfee9445c14216e50b3"} Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.693803 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.827652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-catalog-content\") pod \"c2c88156-4ea0-4113-a344-5550f5ed016c\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.827751 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-utilities\") pod \"c2c88156-4ea0-4113-a344-5550f5ed016c\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.827832 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bqgl\" (UniqueName: \"kubernetes.io/projected/c2c88156-4ea0-4113-a344-5550f5ed016c-kube-api-access-8bqgl\") pod \"c2c88156-4ea0-4113-a344-5550f5ed016c\" (UID: \"c2c88156-4ea0-4113-a344-5550f5ed016c\") " Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.829764 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-utilities" (OuterVolumeSpecName: "utilities") pod "c2c88156-4ea0-4113-a344-5550f5ed016c" (UID: "c2c88156-4ea0-4113-a344-5550f5ed016c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.835000 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2c88156-4ea0-4113-a344-5550f5ed016c-kube-api-access-8bqgl" (OuterVolumeSpecName: "kube-api-access-8bqgl") pod "c2c88156-4ea0-4113-a344-5550f5ed016c" (UID: "c2c88156-4ea0-4113-a344-5550f5ed016c"). InnerVolumeSpecName "kube-api-access-8bqgl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.929456 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.929797 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bqgl\" (UniqueName: \"kubernetes.io/projected/c2c88156-4ea0-4113-a344-5550f5ed016c-kube-api-access-8bqgl\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:50 crc kubenswrapper[5010]: I1126 15:39:50.936885 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c2c88156-4ea0-4113-a344-5550f5ed016c" (UID: "c2c88156-4ea0-4113-a344-5550f5ed016c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.033823 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c2c88156-4ea0-4113-a344-5550f5ed016c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.484744 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.484815 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.535312 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.588697 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-s97kq" event={"ID":"c2c88156-4ea0-4113-a344-5550f5ed016c","Type":"ContainerDied","Data":"940bff1bae5bfe7384b4f1cba1d374b3fa7f0944de705116474dfd137a7c47a9"} Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.588738 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-s97kq" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.589056 5010 scope.go:117] "RemoveContainer" containerID="2cb3a598cde7f94cbbdc0c86c3dfd12ec79edd1ec515acfee9445c14216e50b3" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.619025 5010 scope.go:117] "RemoveContainer" containerID="c2a35892adfe4f80ebf933c151509763e2a4f0a6f47e1d0c7be782236fff85b6" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.636937 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-s97kq"] Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.643608 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-s97kq"] Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.657361 5010 scope.go:117] "RemoveContainer" containerID="0dd1a7485e0b0ed84c51bb00442e562956f58ec762647401273bada9f860dd3a" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.667413 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:51 crc kubenswrapper[5010]: I1126 15:39:51.902881 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" path="/var/lib/kubelet/pods/c2c88156-4ea0-4113-a344-5550f5ed016c/volumes" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.006673 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l6rqp"] Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.007169 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l6rqp" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="registry-server" containerID="cri-o://43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219" gracePeriod=2 Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.407902 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-65s2m"] Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.445878 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.607854 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzwh4\" (UniqueName: \"kubernetes.io/projected/4dc6e8d8-57dd-48ec-824d-6bcbabded070-kube-api-access-kzwh4\") pod \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.608019 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-catalog-content\") pod \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.608118 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-utilities\") pod \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\" (UID: \"4dc6e8d8-57dd-48ec-824d-6bcbabded070\") " Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.610110 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-utilities" (OuterVolumeSpecName: "utilities") pod "4dc6e8d8-57dd-48ec-824d-6bcbabded070" (UID: "4dc6e8d8-57dd-48ec-824d-6bcbabded070"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.610692 5010 generic.go:334] "Generic (PLEG): container finished" podID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerID="43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219" exitCode=0 Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.610900 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l6rqp" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.610880 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerDied","Data":"43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219"} Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.611057 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l6rqp" event={"ID":"4dc6e8d8-57dd-48ec-824d-6bcbabded070","Type":"ContainerDied","Data":"4317d5b6b062af4f094f242e73ebd81cf4ca44ad12d4199f0dc0cb96c74accd6"} Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.611079 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-65s2m" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="registry-server" containerID="cri-o://1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1" gracePeriod=2 Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.611117 5010 scope.go:117] "RemoveContainer" containerID="43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.617358 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dc6e8d8-57dd-48ec-824d-6bcbabded070-kube-api-access-kzwh4" (OuterVolumeSpecName: "kube-api-access-kzwh4") pod "4dc6e8d8-57dd-48ec-824d-6bcbabded070" (UID: "4dc6e8d8-57dd-48ec-824d-6bcbabded070"). InnerVolumeSpecName "kube-api-access-kzwh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.669514 5010 scope.go:117] "RemoveContainer" containerID="fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.710617 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.710674 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzwh4\" (UniqueName: \"kubernetes.io/projected/4dc6e8d8-57dd-48ec-824d-6bcbabded070-kube-api-access-kzwh4\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.717221 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4dc6e8d8-57dd-48ec-824d-6bcbabded070" (UID: "4dc6e8d8-57dd-48ec-824d-6bcbabded070"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.785856 5010 scope.go:117] "RemoveContainer" containerID="15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.813020 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4dc6e8d8-57dd-48ec-824d-6bcbabded070-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.813031 5010 scope.go:117] "RemoveContainer" containerID="43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219" Nov 26 15:39:53 crc kubenswrapper[5010]: E1126 15:39:53.813860 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219\": container with ID starting with 43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219 not found: ID does not exist" containerID="43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.813925 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219"} err="failed to get container status \"43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219\": rpc error: code = NotFound desc = could not find container \"43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219\": container with ID starting with 43d19518ea37660705bf000fcdd51f53d9bf4eda54562d237a5988206cc98219 not found: ID does not exist" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.813963 5010 scope.go:117] "RemoveContainer" containerID="fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d" Nov 26 15:39:53 crc kubenswrapper[5010]: E1126 15:39:53.814670 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d\": container with ID starting with fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d not found: ID does not exist" containerID="fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.814771 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d"} err="failed to get container status \"fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d\": rpc error: code = NotFound desc = could not find container \"fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d\": container with ID starting with fee2cf5c2d1c756b6653245a3af47691f3a2f355a473ca63938aac9e4e41f44d not found: ID does not exist" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.814824 5010 scope.go:117] "RemoveContainer" containerID="15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11" Nov 26 15:39:53 crc kubenswrapper[5010]: E1126 15:39:53.815336 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11\": container with ID starting with 15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11 not found: ID does not exist" containerID="15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.815413 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11"} err="failed to get container status \"15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11\": rpc error: code = NotFound desc = could not find container \"15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11\": container with ID starting with 15eea648e1aa4833734f4b694bf1151f1fc35cfff0f74f7020415147794d4f11 not found: ID does not exist" Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.946967 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l6rqp"] Nov 26 15:39:53 crc kubenswrapper[5010]: I1126 15:39:53.954553 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l6rqp"] Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070341 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s"] Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.070680 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="registry-server" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070700 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="registry-server" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.070737 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="extract-utilities" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070746 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="extract-utilities" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.070760 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="registry-server" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070769 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="registry-server" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.070784 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="extract-content" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070793 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="extract-content" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.070804 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="extract-utilities" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070813 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="extract-utilities" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.070827 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="extract-content" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070837 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="extract-content" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070975 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2c88156-4ea0-4113-a344-5550f5ed016c" containerName="registry-server" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.070995 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" containerName="registry-server" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.072040 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.079375 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.095837 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s"] Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.109390 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.218546 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-catalog-content\") pod \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.219088 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-utilities\") pod \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.219281 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cvmw\" (UniqueName: \"kubernetes.io/projected/96c01e8f-6078-41e9-be76-022c3aa2b3ae-kube-api-access-6cvmw\") pod \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\" (UID: \"96c01e8f-6078-41e9-be76-022c3aa2b3ae\") " Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.219521 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7djl\" (UniqueName: \"kubernetes.io/projected/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-kube-api-access-d7djl\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.219610 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.219701 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.220100 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-utilities" (OuterVolumeSpecName: "utilities") pod "96c01e8f-6078-41e9-be76-022c3aa2b3ae" (UID: "96c01e8f-6078-41e9-be76-022c3aa2b3ae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.226933 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96c01e8f-6078-41e9-be76-022c3aa2b3ae-kube-api-access-6cvmw" (OuterVolumeSpecName: "kube-api-access-6cvmw") pod "96c01e8f-6078-41e9-be76-022c3aa2b3ae" (UID: "96c01e8f-6078-41e9-be76-022c3aa2b3ae"). InnerVolumeSpecName "kube-api-access-6cvmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.238206 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "96c01e8f-6078-41e9-be76-022c3aa2b3ae" (UID: "96c01e8f-6078-41e9-be76-022c3aa2b3ae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.321610 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7djl\" (UniqueName: \"kubernetes.io/projected/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-kube-api-access-d7djl\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.321690 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.321740 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.321783 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cvmw\" (UniqueName: \"kubernetes.io/projected/96c01e8f-6078-41e9-be76-022c3aa2b3ae-kube-api-access-6cvmw\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.321797 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.321806 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96c01e8f-6078-41e9-be76-022c3aa2b3ae-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.322363 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.322751 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.339806 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7djl\" (UniqueName: \"kubernetes.io/projected/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-kube-api-access-d7djl\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.400241 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.622776 5010 generic.go:334] "Generic (PLEG): container finished" podID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerID="1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1" exitCode=0 Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.622868 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65s2m" event={"ID":"96c01e8f-6078-41e9-be76-022c3aa2b3ae","Type":"ContainerDied","Data":"1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1"} Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.622946 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-65s2m" event={"ID":"96c01e8f-6078-41e9-be76-022c3aa2b3ae","Type":"ContainerDied","Data":"e90ea06a365ae8218eca974cdb37e5cd3c1b61f5cdb4fe48ee108ffc323d2090"} Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.622880 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-65s2m" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.622974 5010 scope.go:117] "RemoveContainer" containerID="1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.649307 5010 scope.go:117] "RemoveContainer" containerID="65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.671607 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s"] Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.682189 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-65s2m"] Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.685940 5010 scope.go:117] "RemoveContainer" containerID="90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.686373 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-65s2m"] Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.710678 5010 scope.go:117] "RemoveContainer" containerID="1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.711849 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1\": container with ID starting with 1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1 not found: ID does not exist" containerID="1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.711908 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1"} err="failed to get container status \"1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1\": rpc error: code = NotFound desc = could not find container \"1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1\": container with ID starting with 1f8b53c51ea86d73b24fddc09bd3673a405d6da54f4b1bf51e87257fb7aa3dd1 not found: ID does not exist" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.711956 5010 scope.go:117] "RemoveContainer" containerID="65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.712581 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201\": container with ID starting with 65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201 not found: ID does not exist" containerID="65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.712625 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201"} err="failed to get container status \"65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201\": rpc error: code = NotFound desc = could not find container \"65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201\": container with ID starting with 65b6ac3522f78f6e59f595102e914bb9901fa7019e1510984b3d384f1cde8201 not found: ID does not exist" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.712652 5010 scope.go:117] "RemoveContainer" containerID="90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc" Nov 26 15:39:54 crc kubenswrapper[5010]: E1126 15:39:54.713330 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc\": container with ID starting with 90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc not found: ID does not exist" containerID="90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc" Nov 26 15:39:54 crc kubenswrapper[5010]: I1126 15:39:54.713377 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc"} err="failed to get container status \"90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc\": rpc error: code = NotFound desc = could not find container \"90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc\": container with ID starting with 90e4e0509fbfe2de759a1e4fcc9d180932d2df1e21df8d0f37508c884ec36adc not found: ID does not exist" Nov 26 15:39:55 crc kubenswrapper[5010]: I1126 15:39:55.657218 5010 generic.go:334] "Generic (PLEG): container finished" podID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerID="24d684550f58e01bb2b4c9d79155735997baa71599ddc33a5d51bf31d57412b4" exitCode=0 Nov 26 15:39:55 crc kubenswrapper[5010]: I1126 15:39:55.657299 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" event={"ID":"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712","Type":"ContainerDied","Data":"24d684550f58e01bb2b4c9d79155735997baa71599ddc33a5d51bf31d57412b4"} Nov 26 15:39:55 crc kubenswrapper[5010]: I1126 15:39:55.657340 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" event={"ID":"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712","Type":"ContainerStarted","Data":"b601d94c4b3e7c79b6093f47032bff563198c592cef7e817509d35e49f6892dd"} Nov 26 15:39:55 crc kubenswrapper[5010]: I1126 15:39:55.905847 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dc6e8d8-57dd-48ec-824d-6bcbabded070" path="/var/lib/kubelet/pods/4dc6e8d8-57dd-48ec-824d-6bcbabded070/volumes" Nov 26 15:39:55 crc kubenswrapper[5010]: I1126 15:39:55.907136 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" path="/var/lib/kubelet/pods/96c01e8f-6078-41e9-be76-022c3aa2b3ae/volumes" Nov 26 15:39:58 crc kubenswrapper[5010]: I1126 15:39:58.680069 5010 generic.go:334] "Generic (PLEG): container finished" podID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerID="a19582f0f14c36d26f17e984ba3bc058bb307c744cebff99c89ec6af7e826915" exitCode=0 Nov 26 15:39:58 crc kubenswrapper[5010]: I1126 15:39:58.680159 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" event={"ID":"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712","Type":"ContainerDied","Data":"a19582f0f14c36d26f17e984ba3bc058bb307c744cebff99c89ec6af7e826915"} Nov 26 15:39:59 crc kubenswrapper[5010]: I1126 15:39:59.688804 5010 generic.go:334] "Generic (PLEG): container finished" podID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerID="3d32b7a5b675a20e23bd7e2d5280c6ca1584086b4a1aa150f194f369ba55afb4" exitCode=0 Nov 26 15:39:59 crc kubenswrapper[5010]: I1126 15:39:59.688845 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" event={"ID":"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712","Type":"ContainerDied","Data":"3d32b7a5b675a20e23bd7e2d5280c6ca1584086b4a1aa150f194f369ba55afb4"} Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.019317 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.044928 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-util\") pod \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.045040 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7djl\" (UniqueName: \"kubernetes.io/projected/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-kube-api-access-d7djl\") pod \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.045122 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-bundle\") pod \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\" (UID: \"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712\") " Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.050656 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-bundle" (OuterVolumeSpecName: "bundle") pod "38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" (UID: "38aef96a-1fea-4c2c-9d6f-4dac9bb7f712"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.058249 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-kube-api-access-d7djl" (OuterVolumeSpecName: "kube-api-access-d7djl") pod "38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" (UID: "38aef96a-1fea-4c2c-9d6f-4dac9bb7f712"). InnerVolumeSpecName "kube-api-access-d7djl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.062147 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-util" (OuterVolumeSpecName: "util") pod "38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" (UID: "38aef96a-1fea-4c2c-9d6f-4dac9bb7f712"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.147300 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7djl\" (UniqueName: \"kubernetes.io/projected/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-kube-api-access-d7djl\") on node \"crc\" DevicePath \"\"" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.147358 5010 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.147372 5010 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/38aef96a-1fea-4c2c-9d6f-4dac9bb7f712-util\") on node \"crc\" DevicePath \"\"" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.706823 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" event={"ID":"38aef96a-1fea-4c2c-9d6f-4dac9bb7f712","Type":"ContainerDied","Data":"b601d94c4b3e7c79b6093f47032bff563198c592cef7e817509d35e49f6892dd"} Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.706884 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b601d94c4b3e7c79b6093f47032bff563198c592cef7e817509d35e49f6892dd" Nov 26 15:40:01 crc kubenswrapper[5010]: I1126 15:40:01.706925 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.396571 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5"] Nov 26 15:40:10 crc kubenswrapper[5010]: E1126 15:40:10.397433 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="extract-content" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397450 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="extract-content" Nov 26 15:40:10 crc kubenswrapper[5010]: E1126 15:40:10.397462 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="extract-utilities" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397469 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="extract-utilities" Nov 26 15:40:10 crc kubenswrapper[5010]: E1126 15:40:10.397480 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerName="util" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397488 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerName="util" Nov 26 15:40:10 crc kubenswrapper[5010]: E1126 15:40:10.397499 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="registry-server" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397507 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="registry-server" Nov 26 15:40:10 crc kubenswrapper[5010]: E1126 15:40:10.397520 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerName="pull" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397526 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerName="pull" Nov 26 15:40:10 crc kubenswrapper[5010]: E1126 15:40:10.397548 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerName="extract" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397555 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerName="extract" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397669 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="96c01e8f-6078-41e9-be76-022c3aa2b3ae" containerName="registry-server" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.397681 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="38aef96a-1fea-4c2c-9d6f-4dac9bb7f712" containerName="extract" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.398234 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.400412 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.401413 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.401867 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.402014 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-82vqx" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.402690 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.414216 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5"] Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.486380 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-apiservice-cert\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.486495 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvjl8\" (UniqueName: \"kubernetes.io/projected/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-kube-api-access-dvjl8\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.486606 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-webhook-cert\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.588332 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-apiservice-cert\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.588389 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvjl8\" (UniqueName: \"kubernetes.io/projected/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-kube-api-access-dvjl8\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.588452 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-webhook-cert\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.596021 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-webhook-cert\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.596430 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-apiservice-cert\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.623155 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvjl8\" (UniqueName: \"kubernetes.io/projected/afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6-kube-api-access-dvjl8\") pod \"metallb-operator-controller-manager-7757b8b846-drzn5\" (UID: \"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6\") " pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.624901 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k76mc"] Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.626178 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.633264 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k76mc"] Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.689778 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-utilities\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.689830 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2w6v\" (UniqueName: \"kubernetes.io/projected/6287bb70-a158-4c99-9ad7-d6b4d17950b9-kube-api-access-r2w6v\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.690067 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-catalog-content\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.718666 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.725524 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955"] Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.726246 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.729537 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.729790 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.730851 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-g5pfg" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.747266 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955"] Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.795225 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-utilities\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.795303 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2w6v\" (UniqueName: \"kubernetes.io/projected/6287bb70-a158-4c99-9ad7-d6b4d17950b9-kube-api-access-r2w6v\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.795359 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-catalog-content\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.795397 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/93eee346-e7d2-4097-896b-cc1ffa20d03b-webhook-cert\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.795425 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwgvl\" (UniqueName: \"kubernetes.io/projected/93eee346-e7d2-4097-896b-cc1ffa20d03b-kube-api-access-kwgvl\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.795450 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/93eee346-e7d2-4097-896b-cc1ffa20d03b-apiservice-cert\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.796206 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-catalog-content\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.796357 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-utilities\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.818966 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2w6v\" (UniqueName: \"kubernetes.io/projected/6287bb70-a158-4c99-9ad7-d6b4d17950b9-kube-api-access-r2w6v\") pod \"certified-operators-k76mc\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.897648 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/93eee346-e7d2-4097-896b-cc1ffa20d03b-webhook-cert\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.897698 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwgvl\" (UniqueName: \"kubernetes.io/projected/93eee346-e7d2-4097-896b-cc1ffa20d03b-kube-api-access-kwgvl\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.897749 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/93eee346-e7d2-4097-896b-cc1ffa20d03b-apiservice-cert\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.905343 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/93eee346-e7d2-4097-896b-cc1ffa20d03b-webhook-cert\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.906030 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/93eee346-e7d2-4097-896b-cc1ffa20d03b-apiservice-cert\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.926338 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwgvl\" (UniqueName: \"kubernetes.io/projected/93eee346-e7d2-4097-896b-cc1ffa20d03b-kube-api-access-kwgvl\") pod \"metallb-operator-webhook-server-597c8d6cb6-jk955\" (UID: \"93eee346-e7d2-4097-896b-cc1ffa20d03b\") " pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:10 crc kubenswrapper[5010]: I1126 15:40:10.978491 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.051678 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5"] Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.073751 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.317548 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k76mc"] Nov 26 15:40:11 crc kubenswrapper[5010]: W1126 15:40:11.326628 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6287bb70_a158_4c99_9ad7_d6b4d17950b9.slice/crio-58c0b9a7914d29957550018646dec626830f12fdf118528d8c6ce35aae2352bd WatchSource:0}: Error finding container 58c0b9a7914d29957550018646dec626830f12fdf118528d8c6ce35aae2352bd: Status 404 returned error can't find the container with id 58c0b9a7914d29957550018646dec626830f12fdf118528d8c6ce35aae2352bd Nov 26 15:40:11 crc kubenswrapper[5010]: E1126 15:40:11.576227 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6287bb70_a158_4c99_9ad7_d6b4d17950b9.slice/crio-f0dee83481b6123cb448d715e6b390d184ddd1fd9136967a226539931348b885.scope\": RecentStats: unable to find data in memory cache]" Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.592185 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955"] Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.767656 5010 generic.go:334] "Generic (PLEG): container finished" podID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerID="f0dee83481b6123cb448d715e6b390d184ddd1fd9136967a226539931348b885" exitCode=0 Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.767751 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k76mc" event={"ID":"6287bb70-a158-4c99-9ad7-d6b4d17950b9","Type":"ContainerDied","Data":"f0dee83481b6123cb448d715e6b390d184ddd1fd9136967a226539931348b885"} Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.768082 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k76mc" event={"ID":"6287bb70-a158-4c99-9ad7-d6b4d17950b9","Type":"ContainerStarted","Data":"58c0b9a7914d29957550018646dec626830f12fdf118528d8c6ce35aae2352bd"} Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.769606 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" event={"ID":"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6","Type":"ContainerStarted","Data":"a0d53abd3c2a058e0babc8c8a8caa420f1b01f4b11f3c444fcd19ed1b795421f"} Nov 26 15:40:11 crc kubenswrapper[5010]: I1126 15:40:11.771857 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" event={"ID":"93eee346-e7d2-4097-896b-cc1ffa20d03b","Type":"ContainerStarted","Data":"e7e79ed81a1a6f8441d7168e32f465add0188fc66fd6d9031a450668432511a9"} Nov 26 15:40:13 crc kubenswrapper[5010]: I1126 15:40:13.785649 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k76mc" event={"ID":"6287bb70-a158-4c99-9ad7-d6b4d17950b9","Type":"ContainerStarted","Data":"672066dbff6a87f93ee69a6b0ccdf0aff3eb60fd2d734be98763c968a41d455a"} Nov 26 15:40:13 crc kubenswrapper[5010]: I1126 15:40:13.787050 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" event={"ID":"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6","Type":"ContainerStarted","Data":"5c955e1edf2b98e60d973c5658642dd0bf11329543a184e4299123ea0328968d"} Nov 26 15:40:13 crc kubenswrapper[5010]: I1126 15:40:13.787396 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:13 crc kubenswrapper[5010]: I1126 15:40:13.831116 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" podStartSLOduration=1.353006299 podStartE2EDuration="3.831094989s" podCreationTimestamp="2025-11-26 15:40:10 +0000 UTC" firstStartedPulling="2025-11-26 15:40:11.086312113 +0000 UTC m=+831.877029261" lastFinishedPulling="2025-11-26 15:40:13.564400803 +0000 UTC m=+834.355117951" observedRunningTime="2025-11-26 15:40:13.828912864 +0000 UTC m=+834.619630012" watchObservedRunningTime="2025-11-26 15:40:13.831094989 +0000 UTC m=+834.621812137" Nov 26 15:40:14 crc kubenswrapper[5010]: I1126 15:40:14.804865 5010 generic.go:334] "Generic (PLEG): container finished" podID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerID="672066dbff6a87f93ee69a6b0ccdf0aff3eb60fd2d734be98763c968a41d455a" exitCode=0 Nov 26 15:40:14 crc kubenswrapper[5010]: I1126 15:40:14.804955 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k76mc" event={"ID":"6287bb70-a158-4c99-9ad7-d6b4d17950b9","Type":"ContainerDied","Data":"672066dbff6a87f93ee69a6b0ccdf0aff3eb60fd2d734be98763c968a41d455a"} Nov 26 15:40:17 crc kubenswrapper[5010]: I1126 15:40:17.828010 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k76mc" event={"ID":"6287bb70-a158-4c99-9ad7-d6b4d17950b9","Type":"ContainerStarted","Data":"545ca47e87be68e270c5a1eced2b9cfd8a8317ab66fa9d6efd4c3a76313c797a"} Nov 26 15:40:17 crc kubenswrapper[5010]: I1126 15:40:17.831229 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" event={"ID":"93eee346-e7d2-4097-896b-cc1ffa20d03b","Type":"ContainerStarted","Data":"0753a35a3b600027db9087c9662ed8e2cee63d724ef6ee21a15c42b30be2af3c"} Nov 26 15:40:17 crc kubenswrapper[5010]: I1126 15:40:17.832019 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:17 crc kubenswrapper[5010]: I1126 15:40:17.851098 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k76mc" podStartSLOduration=2.894764063 podStartE2EDuration="7.851077436s" podCreationTimestamp="2025-11-26 15:40:10 +0000 UTC" firstStartedPulling="2025-11-26 15:40:11.769372867 +0000 UTC m=+832.560090015" lastFinishedPulling="2025-11-26 15:40:16.72568624 +0000 UTC m=+837.516403388" observedRunningTime="2025-11-26 15:40:17.845672812 +0000 UTC m=+838.636389970" watchObservedRunningTime="2025-11-26 15:40:17.851077436 +0000 UTC m=+838.641794594" Nov 26 15:40:20 crc kubenswrapper[5010]: I1126 15:40:20.979273 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:20 crc kubenswrapper[5010]: I1126 15:40:20.979736 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:21 crc kubenswrapper[5010]: I1126 15:40:21.030719 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:21 crc kubenswrapper[5010]: I1126 15:40:21.051569 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" podStartSLOduration=5.900940112 podStartE2EDuration="11.051535935s" podCreationTimestamp="2025-11-26 15:40:10 +0000 UTC" firstStartedPulling="2025-11-26 15:40:11.595159345 +0000 UTC m=+832.385876493" lastFinishedPulling="2025-11-26 15:40:16.745755168 +0000 UTC m=+837.536472316" observedRunningTime="2025-11-26 15:40:17.883559502 +0000 UTC m=+838.674276660" watchObservedRunningTime="2025-11-26 15:40:21.051535935 +0000 UTC m=+841.842253113" Nov 26 15:40:21 crc kubenswrapper[5010]: I1126 15:40:21.940582 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:24 crc kubenswrapper[5010]: I1126 15:40:24.464905 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k76mc"] Nov 26 15:40:24 crc kubenswrapper[5010]: I1126 15:40:24.465796 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k76mc" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="registry-server" containerID="cri-o://545ca47e87be68e270c5a1eced2b9cfd8a8317ab66fa9d6efd4c3a76313c797a" gracePeriod=2 Nov 26 15:40:25 crc kubenswrapper[5010]: I1126 15:40:25.906301 5010 generic.go:334] "Generic (PLEG): container finished" podID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerID="545ca47e87be68e270c5a1eced2b9cfd8a8317ab66fa9d6efd4c3a76313c797a" exitCode=0 Nov 26 15:40:25 crc kubenswrapper[5010]: I1126 15:40:25.906376 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k76mc" event={"ID":"6287bb70-a158-4c99-9ad7-d6b4d17950b9","Type":"ContainerDied","Data":"545ca47e87be68e270c5a1eced2b9cfd8a8317ab66fa9d6efd4c3a76313c797a"} Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.224128 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.333572 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-catalog-content\") pod \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.333841 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-utilities\") pod \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.334849 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2w6v\" (UniqueName: \"kubernetes.io/projected/6287bb70-a158-4c99-9ad7-d6b4d17950b9-kube-api-access-r2w6v\") pod \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\" (UID: \"6287bb70-a158-4c99-9ad7-d6b4d17950b9\") " Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.335178 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-utilities" (OuterVolumeSpecName: "utilities") pod "6287bb70-a158-4c99-9ad7-d6b4d17950b9" (UID: "6287bb70-a158-4c99-9ad7-d6b4d17950b9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.335607 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.346902 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6287bb70-a158-4c99-9ad7-d6b4d17950b9-kube-api-access-r2w6v" (OuterVolumeSpecName: "kube-api-access-r2w6v") pod "6287bb70-a158-4c99-9ad7-d6b4d17950b9" (UID: "6287bb70-a158-4c99-9ad7-d6b4d17950b9"). InnerVolumeSpecName "kube-api-access-r2w6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.383054 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6287bb70-a158-4c99-9ad7-d6b4d17950b9" (UID: "6287bb70-a158-4c99-9ad7-d6b4d17950b9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.437206 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2w6v\" (UniqueName: \"kubernetes.io/projected/6287bb70-a158-4c99-9ad7-d6b4d17950b9-kube-api-access-r2w6v\") on node \"crc\" DevicePath \"\"" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.437247 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6287bb70-a158-4c99-9ad7-d6b4d17950b9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.922303 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k76mc" event={"ID":"6287bb70-a158-4c99-9ad7-d6b4d17950b9","Type":"ContainerDied","Data":"58c0b9a7914d29957550018646dec626830f12fdf118528d8c6ce35aae2352bd"} Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.922922 5010 scope.go:117] "RemoveContainer" containerID="545ca47e87be68e270c5a1eced2b9cfd8a8317ab66fa9d6efd4c3a76313c797a" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.922395 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k76mc" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.942845 5010 scope.go:117] "RemoveContainer" containerID="672066dbff6a87f93ee69a6b0ccdf0aff3eb60fd2d734be98763c968a41d455a" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.977469 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k76mc"] Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.980778 5010 scope.go:117] "RemoveContainer" containerID="f0dee83481b6123cb448d715e6b390d184ddd1fd9136967a226539931348b885" Nov 26 15:40:26 crc kubenswrapper[5010]: I1126 15:40:26.986363 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k76mc"] Nov 26 15:40:27 crc kubenswrapper[5010]: I1126 15:40:27.901795 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" path="/var/lib/kubelet/pods/6287bb70-a158-4c99-9ad7-d6b4d17950b9/volumes" Nov 26 15:40:31 crc kubenswrapper[5010]: I1126 15:40:31.079577 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-597c8d6cb6-jk955" Nov 26 15:40:41 crc kubenswrapper[5010]: I1126 15:40:41.423277 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:40:41 crc kubenswrapper[5010]: I1126 15:40:41.423864 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:40:50 crc kubenswrapper[5010]: I1126 15:40:50.721485 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.606277 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-q4w26"] Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.606687 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="extract-content" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.606743 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="extract-content" Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.606766 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="registry-server" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.606781 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="registry-server" Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.606806 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="extract-utilities" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.606820 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="extract-utilities" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.607007 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6287bb70-a158-4c99-9ad7-d6b4d17950b9" containerName="registry-server" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.607705 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.612962 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-wsf8v" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.613183 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.631089 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-pxvlg"] Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.634422 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.638007 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.638572 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.640178 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-q4w26"] Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672544 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqxcn\" (UniqueName: \"kubernetes.io/projected/abfe8693-75aa-4c43-8c6e-459b37a00cd0-kube-api-access-sqxcn\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672646 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dksvd\" (UniqueName: \"kubernetes.io/projected/6003861e-afe0-4607-a3d4-05f646e2519a-kube-api-access-dksvd\") pod \"frr-k8s-webhook-server-6998585d5-q4w26\" (UID: \"6003861e-afe0-4607-a3d4-05f646e2519a\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672705 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-sockets\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672778 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-reloader\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672824 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672859 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6003861e-afe0-4607-a3d4-05f646e2519a-cert\") pod \"frr-k8s-webhook-server-6998585d5-q4w26\" (UID: \"6003861e-afe0-4607-a3d4-05f646e2519a\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672884 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-startup\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672906 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-conf\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.672930 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics-certs\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.732751 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-ddjx5"] Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.733975 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.734412 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-vjlsw"] Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.736466 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.738005 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.738293 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.738441 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-696hv" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.738586 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.738766 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.748256 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-vjlsw"] Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774555 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics-certs\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774615 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-metrics-certs\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774644 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8d6e04bf-3113-4c08-b053-acdc47461280-metallb-excludel2\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774687 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqxcn\" (UniqueName: \"kubernetes.io/projected/abfe8693-75aa-4c43-8c6e-459b37a00cd0-kube-api-access-sqxcn\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774784 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-metrics-certs\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774808 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dksvd\" (UniqueName: \"kubernetes.io/projected/6003861e-afe0-4607-a3d4-05f646e2519a-kube-api-access-dksvd\") pod \"frr-k8s-webhook-server-6998585d5-q4w26\" (UID: \"6003861e-afe0-4607-a3d4-05f646e2519a\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774833 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-sockets\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774851 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774873 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-reloader\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774890 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774906 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9llg\" (UniqueName: \"kubernetes.io/projected/8d6e04bf-3113-4c08-b053-acdc47461280-kube-api-access-b9llg\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774922 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bv7wp\" (UniqueName: \"kubernetes.io/projected/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-kube-api-access-bv7wp\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774937 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-cert\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774962 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6003861e-afe0-4607-a3d4-05f646e2519a-cert\") pod \"frr-k8s-webhook-server-6998585d5-q4w26\" (UID: \"6003861e-afe0-4607-a3d4-05f646e2519a\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774977 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-conf\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.774996 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-startup\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.775934 5010 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.776064 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics-certs podName:abfe8693-75aa-4c43-8c6e-459b37a00cd0 nodeName:}" failed. No retries permitted until 2025-11-26 15:40:52.276020847 +0000 UTC m=+873.066738195 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics-certs") pod "frr-k8s-pxvlg" (UID: "abfe8693-75aa-4c43-8c6e-459b37a00cd0") : secret "frr-k8s-certs-secret" not found Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.776882 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-startup\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.777068 5010 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.777130 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6003861e-afe0-4607-a3d4-05f646e2519a-cert podName:6003861e-afe0-4607-a3d4-05f646e2519a nodeName:}" failed. No retries permitted until 2025-11-26 15:40:52.277117865 +0000 UTC m=+873.067835223 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6003861e-afe0-4607-a3d4-05f646e2519a-cert") pod "frr-k8s-webhook-server-6998585d5-q4w26" (UID: "6003861e-afe0-4607-a3d4-05f646e2519a") : secret "frr-k8s-webhook-server-cert" not found Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.777138 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-sockets\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.777335 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-reloader\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.777648 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-frr-conf\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.777787 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.797272 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dksvd\" (UniqueName: \"kubernetes.io/projected/6003861e-afe0-4607-a3d4-05f646e2519a-kube-api-access-dksvd\") pod \"frr-k8s-webhook-server-6998585d5-q4w26\" (UID: \"6003861e-afe0-4607-a3d4-05f646e2519a\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.801740 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqxcn\" (UniqueName: \"kubernetes.io/projected/abfe8693-75aa-4c43-8c6e-459b37a00cd0-kube-api-access-sqxcn\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.876158 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8d6e04bf-3113-4c08-b053-acdc47461280-metallb-excludel2\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.876239 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-metrics-certs\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.876274 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.876298 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9llg\" (UniqueName: \"kubernetes.io/projected/8d6e04bf-3113-4c08-b053-acdc47461280-kube-api-access-b9llg\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.876335 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bv7wp\" (UniqueName: \"kubernetes.io/projected/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-kube-api-access-bv7wp\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.876354 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-cert\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.876401 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-metrics-certs\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.876704 5010 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.876857 5010 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.876913 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-metrics-certs podName:6a24f7b6-d06d-4f11-a632-d997d92a5c5b nodeName:}" failed. No retries permitted until 2025-11-26 15:40:52.376897245 +0000 UTC m=+873.167614383 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-metrics-certs") pod "controller-6c7b4b5f48-vjlsw" (UID: "6a24f7b6-d06d-4f11-a632-d997d92a5c5b") : secret "controller-certs-secret" not found Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.876937 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-metrics-certs podName:8d6e04bf-3113-4c08-b053-acdc47461280 nodeName:}" failed. No retries permitted until 2025-11-26 15:40:52.376924096 +0000 UTC m=+873.167641244 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-metrics-certs") pod "speaker-ddjx5" (UID: "8d6e04bf-3113-4c08-b053-acdc47461280") : secret "speaker-certs-secret" not found Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.877190 5010 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 15:40:51 crc kubenswrapper[5010]: E1126 15:40:51.877306 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist podName:8d6e04bf-3113-4c08-b053-acdc47461280 nodeName:}" failed. No retries permitted until 2025-11-26 15:40:52.377274225 +0000 UTC m=+873.167991373 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist") pod "speaker-ddjx5" (UID: "8d6e04bf-3113-4c08-b053-acdc47461280") : secret "metallb-memberlist" not found Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.878049 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8d6e04bf-3113-4c08-b053-acdc47461280-metallb-excludel2\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.880383 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.894272 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-cert\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.898081 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bv7wp\" (UniqueName: \"kubernetes.io/projected/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-kube-api-access-bv7wp\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:51 crc kubenswrapper[5010]: I1126 15:40:51.913392 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9llg\" (UniqueName: \"kubernetes.io/projected/8d6e04bf-3113-4c08-b053-acdc47461280-kube-api-access-b9llg\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.283899 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6003861e-afe0-4607-a3d4-05f646e2519a-cert\") pod \"frr-k8s-webhook-server-6998585d5-q4w26\" (UID: \"6003861e-afe0-4607-a3d4-05f646e2519a\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.284270 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics-certs\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.290789 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6003861e-afe0-4607-a3d4-05f646e2519a-cert\") pod \"frr-k8s-webhook-server-6998585d5-q4w26\" (UID: \"6003861e-afe0-4607-a3d4-05f646e2519a\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.294066 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/abfe8693-75aa-4c43-8c6e-459b37a00cd0-metrics-certs\") pod \"frr-k8s-pxvlg\" (UID: \"abfe8693-75aa-4c43-8c6e-459b37a00cd0\") " pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.387026 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.387140 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-metrics-certs\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.387236 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-metrics-certs\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:52 crc kubenswrapper[5010]: E1126 15:40:52.387320 5010 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 26 15:40:52 crc kubenswrapper[5010]: E1126 15:40:52.387435 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist podName:8d6e04bf-3113-4c08-b053-acdc47461280 nodeName:}" failed. No retries permitted until 2025-11-26 15:40:53.387406717 +0000 UTC m=+874.178123896 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist") pod "speaker-ddjx5" (UID: "8d6e04bf-3113-4c08-b053-acdc47461280") : secret "metallb-memberlist" not found Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.391177 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-metrics-certs\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.392673 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6a24f7b6-d06d-4f11-a632-d997d92a5c5b-metrics-certs\") pod \"controller-6c7b4b5f48-vjlsw\" (UID: \"6a24f7b6-d06d-4f11-a632-d997d92a5c5b\") " pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.535386 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.554537 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.660971 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.839527 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-q4w26"] Nov 26 15:40:52 crc kubenswrapper[5010]: W1126 15:40:52.851983 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6003861e_afe0_4607_a3d4_05f646e2519a.slice/crio-e74e393f2acab226d48b1440a3295278a76e3a73c09d766f2743ecdb2db27c05 WatchSource:0}: Error finding container e74e393f2acab226d48b1440a3295278a76e3a73c09d766f2743ecdb2db27c05: Status 404 returned error can't find the container with id e74e393f2acab226d48b1440a3295278a76e3a73c09d766f2743ecdb2db27c05 Nov 26 15:40:52 crc kubenswrapper[5010]: I1126 15:40:52.935608 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-vjlsw"] Nov 26 15:40:52 crc kubenswrapper[5010]: W1126 15:40:52.945196 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a24f7b6_d06d_4f11_a632_d997d92a5c5b.slice/crio-8e6636be58678c1a1083624c31e8413adeaeb61de3a2cd1fe7b5c00b0d9d554a WatchSource:0}: Error finding container 8e6636be58678c1a1083624c31e8413adeaeb61de3a2cd1fe7b5c00b0d9d554a: Status 404 returned error can't find the container with id 8e6636be58678c1a1083624c31e8413adeaeb61de3a2cd1fe7b5c00b0d9d554a Nov 26 15:40:53 crc kubenswrapper[5010]: I1126 15:40:53.120500 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" event={"ID":"6003861e-afe0-4607-a3d4-05f646e2519a","Type":"ContainerStarted","Data":"e74e393f2acab226d48b1440a3295278a76e3a73c09d766f2743ecdb2db27c05"} Nov 26 15:40:53 crc kubenswrapper[5010]: I1126 15:40:53.123241 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vjlsw" event={"ID":"6a24f7b6-d06d-4f11-a632-d997d92a5c5b","Type":"ContainerStarted","Data":"8e6636be58678c1a1083624c31e8413adeaeb61de3a2cd1fe7b5c00b0d9d554a"} Nov 26 15:40:53 crc kubenswrapper[5010]: I1126 15:40:53.124947 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerStarted","Data":"35ab1aba8f553e98a770ae4d46123b828b353cf6c59768075d0e44bd13b60d8c"} Nov 26 15:40:53 crc kubenswrapper[5010]: I1126 15:40:53.405276 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:53 crc kubenswrapper[5010]: I1126 15:40:53.412013 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8d6e04bf-3113-4c08-b053-acdc47461280-memberlist\") pod \"speaker-ddjx5\" (UID: \"8d6e04bf-3113-4c08-b053-acdc47461280\") " pod="metallb-system/speaker-ddjx5" Nov 26 15:40:53 crc kubenswrapper[5010]: I1126 15:40:53.553230 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-ddjx5" Nov 26 15:40:53 crc kubenswrapper[5010]: W1126 15:40:53.588179 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d6e04bf_3113_4c08_b053_acdc47461280.slice/crio-237d86bea1d2378ee47f0d9674c9206fb154f8233116f4622a8e589df42922d6 WatchSource:0}: Error finding container 237d86bea1d2378ee47f0d9674c9206fb154f8233116f4622a8e589df42922d6: Status 404 returned error can't find the container with id 237d86bea1d2378ee47f0d9674c9206fb154f8233116f4622a8e589df42922d6 Nov 26 15:40:54 crc kubenswrapper[5010]: I1126 15:40:54.135274 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vjlsw" event={"ID":"6a24f7b6-d06d-4f11-a632-d997d92a5c5b","Type":"ContainerStarted","Data":"22db14e60541bab293b7eca07142e143535721a813aff10add31ab8e4bb70732"} Nov 26 15:40:54 crc kubenswrapper[5010]: I1126 15:40:54.135346 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-vjlsw" event={"ID":"6a24f7b6-d06d-4f11-a632-d997d92a5c5b","Type":"ContainerStarted","Data":"b05f980968af4c728df314aa30c6b76a76bcc2a92f2688fcc72d13dd9c5bda4d"} Nov 26 15:40:54 crc kubenswrapper[5010]: I1126 15:40:54.135437 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:40:54 crc kubenswrapper[5010]: I1126 15:40:54.136585 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ddjx5" event={"ID":"8d6e04bf-3113-4c08-b053-acdc47461280","Type":"ContainerStarted","Data":"5c1baa5da11558b76178ace06e090d393857f3fb8e6adc514c8656864cbdac12"} Nov 26 15:40:54 crc kubenswrapper[5010]: I1126 15:40:54.136618 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ddjx5" event={"ID":"8d6e04bf-3113-4c08-b053-acdc47461280","Type":"ContainerStarted","Data":"237d86bea1d2378ee47f0d9674c9206fb154f8233116f4622a8e589df42922d6"} Nov 26 15:40:54 crc kubenswrapper[5010]: I1126 15:40:54.159831 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-vjlsw" podStartSLOduration=3.159805679 podStartE2EDuration="3.159805679s" podCreationTimestamp="2025-11-26 15:40:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:40:54.158739103 +0000 UTC m=+874.949456271" watchObservedRunningTime="2025-11-26 15:40:54.159805679 +0000 UTC m=+874.950522827" Nov 26 15:40:55 crc kubenswrapper[5010]: I1126 15:40:55.158993 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-ddjx5" event={"ID":"8d6e04bf-3113-4c08-b053-acdc47461280","Type":"ContainerStarted","Data":"c09a07ba168d79ac2ddd1ea7e8431af2ec063a350ffd26bea758c88897be93ce"} Nov 26 15:40:55 crc kubenswrapper[5010]: I1126 15:40:55.159578 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-ddjx5" Nov 26 15:40:55 crc kubenswrapper[5010]: I1126 15:40:55.214802 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-ddjx5" podStartSLOduration=4.214783473 podStartE2EDuration="4.214783473s" podCreationTimestamp="2025-11-26 15:40:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:40:55.206662391 +0000 UTC m=+875.997379539" watchObservedRunningTime="2025-11-26 15:40:55.214783473 +0000 UTC m=+876.005500621" Nov 26 15:41:02 crc kubenswrapper[5010]: I1126 15:41:02.227144 5010 generic.go:334] "Generic (PLEG): container finished" podID="abfe8693-75aa-4c43-8c6e-459b37a00cd0" containerID="09dd0a857f7efa9cf5cff3bb6727a5448818a8c7f1240f7c87b1718d2fdfd67d" exitCode=0 Nov 26 15:41:02 crc kubenswrapper[5010]: I1126 15:41:02.228098 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerDied","Data":"09dd0a857f7efa9cf5cff3bb6727a5448818a8c7f1240f7c87b1718d2fdfd67d"} Nov 26 15:41:02 crc kubenswrapper[5010]: I1126 15:41:02.230332 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" event={"ID":"6003861e-afe0-4607-a3d4-05f646e2519a","Type":"ContainerStarted","Data":"b9ee315df65c1efbd95c566f36c46fd238d680825a26c05ee258d39cb35174af"} Nov 26 15:41:02 crc kubenswrapper[5010]: I1126 15:41:02.231255 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:41:03 crc kubenswrapper[5010]: I1126 15:41:03.239366 5010 generic.go:334] "Generic (PLEG): container finished" podID="abfe8693-75aa-4c43-8c6e-459b37a00cd0" containerID="8d03c0936280c20cacb3ed9022c5e378a6f72a445e7e51c893d7b1a441ff3126" exitCode=0 Nov 26 15:41:03 crc kubenswrapper[5010]: I1126 15:41:03.239501 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerDied","Data":"8d03c0936280c20cacb3ed9022c5e378a6f72a445e7e51c893d7b1a441ff3126"} Nov 26 15:41:03 crc kubenswrapper[5010]: I1126 15:41:03.275521 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" podStartSLOduration=3.359128604 podStartE2EDuration="12.275503279s" podCreationTimestamp="2025-11-26 15:40:51 +0000 UTC" firstStartedPulling="2025-11-26 15:40:52.857304997 +0000 UTC m=+873.648022145" lastFinishedPulling="2025-11-26 15:41:01.773679622 +0000 UTC m=+882.564396820" observedRunningTime="2025-11-26 15:41:02.293241561 +0000 UTC m=+883.083958709" watchObservedRunningTime="2025-11-26 15:41:03.275503279 +0000 UTC m=+884.066220437" Nov 26 15:41:03 crc kubenswrapper[5010]: I1126 15:41:03.560290 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-ddjx5" Nov 26 15:41:04 crc kubenswrapper[5010]: I1126 15:41:04.257363 5010 generic.go:334] "Generic (PLEG): container finished" podID="abfe8693-75aa-4c43-8c6e-459b37a00cd0" containerID="0e75164a1ac9e36dc6a52b5d77a9c071d6dc54d84361a2b9f744c78964af076b" exitCode=0 Nov 26 15:41:04 crc kubenswrapper[5010]: I1126 15:41:04.257502 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerDied","Data":"0e75164a1ac9e36dc6a52b5d77a9c071d6dc54d84361a2b9f744c78964af076b"} Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.178647 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42"] Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.180940 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.183949 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.199665 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42"] Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.207081 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwkhd\" (UniqueName: \"kubernetes.io/projected/5045e2fe-8fec-4331-885c-77b33cd99537-kube-api-access-vwkhd\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.207138 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.207287 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.272002 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerStarted","Data":"83070d2a3c000c97c4f73f94de686950ae61882a1ece6a070cc4fa8c7389333b"} Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.272046 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerStarted","Data":"a69f511aab0d7fb22a04245e1a6ebcc88502b9505d05fab37bcfda9b53bc985c"} Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.272056 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerStarted","Data":"5b066cd64046022f3b9003648a7df6f8f7b81ab0adf67d6b8f105aa638727492"} Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.272067 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerStarted","Data":"5e2f8deff422c8c72f0bda2801fd7b0dda1c4487e205101a1101415c54254b59"} Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.309155 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.309375 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwkhd\" (UniqueName: \"kubernetes.io/projected/5045e2fe-8fec-4331-885c-77b33cd99537-kube-api-access-vwkhd\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.309460 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.309761 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.310317 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.336486 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwkhd\" (UniqueName: \"kubernetes.io/projected/5045e2fe-8fec-4331-885c-77b33cd99537-kube-api-access-vwkhd\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:05 crc kubenswrapper[5010]: I1126 15:41:05.503281 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:06 crc kubenswrapper[5010]: I1126 15:41:06.043649 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42"] Nov 26 15:41:06 crc kubenswrapper[5010]: W1126 15:41:06.054385 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5045e2fe_8fec_4331_885c_77b33cd99537.slice/crio-919dcdb8db755951d2012eddad41706d2ba06592ae1da039925e5520696ba472 WatchSource:0}: Error finding container 919dcdb8db755951d2012eddad41706d2ba06592ae1da039925e5520696ba472: Status 404 returned error can't find the container with id 919dcdb8db755951d2012eddad41706d2ba06592ae1da039925e5520696ba472 Nov 26 15:41:06 crc kubenswrapper[5010]: I1126 15:41:06.289359 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerStarted","Data":"c7cb1cf5e2a0a8c7ddf6953c83ed7027ef9aed7ff9d101dcffc57099bda8c526"} Nov 26 15:41:06 crc kubenswrapper[5010]: I1126 15:41:06.289824 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-pxvlg" event={"ID":"abfe8693-75aa-4c43-8c6e-459b37a00cd0","Type":"ContainerStarted","Data":"1d2de43c8929fce62a2c8a15011b205aa513871a8e89a5e3339513bb05da50df"} Nov 26 15:41:06 crc kubenswrapper[5010]: I1126 15:41:06.290911 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" event={"ID":"5045e2fe-8fec-4331-885c-77b33cd99537","Type":"ContainerStarted","Data":"4177ba3c31d3ab900f4ccd76f4b2ea116c46a96a404f372b06c9169dd278db5e"} Nov 26 15:41:06 crc kubenswrapper[5010]: I1126 15:41:06.290947 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" event={"ID":"5045e2fe-8fec-4331-885c-77b33cd99537","Type":"ContainerStarted","Data":"919dcdb8db755951d2012eddad41706d2ba06592ae1da039925e5520696ba472"} Nov 26 15:41:06 crc kubenswrapper[5010]: I1126 15:41:06.327985 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-pxvlg" podStartSLOduration=6.433937935 podStartE2EDuration="15.327919042s" podCreationTimestamp="2025-11-26 15:40:51 +0000 UTC" firstStartedPulling="2025-11-26 15:40:52.854002544 +0000 UTC m=+873.644719682" lastFinishedPulling="2025-11-26 15:41:01.747983611 +0000 UTC m=+882.538700789" observedRunningTime="2025-11-26 15:41:06.324130138 +0000 UTC m=+887.114847296" watchObservedRunningTime="2025-11-26 15:41:06.327919042 +0000 UTC m=+887.118636190" Nov 26 15:41:07 crc kubenswrapper[5010]: I1126 15:41:07.301072 5010 generic.go:334] "Generic (PLEG): container finished" podID="5045e2fe-8fec-4331-885c-77b33cd99537" containerID="4177ba3c31d3ab900f4ccd76f4b2ea116c46a96a404f372b06c9169dd278db5e" exitCode=0 Nov 26 15:41:07 crc kubenswrapper[5010]: I1126 15:41:07.301203 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" event={"ID":"5045e2fe-8fec-4331-885c-77b33cd99537","Type":"ContainerDied","Data":"4177ba3c31d3ab900f4ccd76f4b2ea116c46a96a404f372b06c9169dd278db5e"} Nov 26 15:41:07 crc kubenswrapper[5010]: I1126 15:41:07.302045 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:41:07 crc kubenswrapper[5010]: I1126 15:41:07.556312 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:41:07 crc kubenswrapper[5010]: I1126 15:41:07.604034 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:41:11 crc kubenswrapper[5010]: I1126 15:41:11.362422 5010 generic.go:334] "Generic (PLEG): container finished" podID="5045e2fe-8fec-4331-885c-77b33cd99537" containerID="b8207f96f934399fdb80593b721da0e3993e22f54e04e1b334e134d5eb0cd270" exitCode=0 Nov 26 15:41:11 crc kubenswrapper[5010]: I1126 15:41:11.362928 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" event={"ID":"5045e2fe-8fec-4331-885c-77b33cd99537","Type":"ContainerDied","Data":"b8207f96f934399fdb80593b721da0e3993e22f54e04e1b334e134d5eb0cd270"} Nov 26 15:41:11 crc kubenswrapper[5010]: I1126 15:41:11.423403 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:41:11 crc kubenswrapper[5010]: I1126 15:41:11.423563 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:41:12 crc kubenswrapper[5010]: I1126 15:41:12.374552 5010 generic.go:334] "Generic (PLEG): container finished" podID="5045e2fe-8fec-4331-885c-77b33cd99537" containerID="a51ed16f557c4f8395003e7e348bf892e27aec82c6b0556bfd130a9faabc6a31" exitCode=0 Nov 26 15:41:12 crc kubenswrapper[5010]: I1126 15:41:12.374637 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" event={"ID":"5045e2fe-8fec-4331-885c-77b33cd99537","Type":"ContainerDied","Data":"a51ed16f557c4f8395003e7e348bf892e27aec82c6b0556bfd130a9faabc6a31"} Nov 26 15:41:12 crc kubenswrapper[5010]: I1126 15:41:12.545217 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-q4w26" Nov 26 15:41:12 crc kubenswrapper[5010]: I1126 15:41:12.666555 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-vjlsw" Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.699292 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.758205 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-util\") pod \"5045e2fe-8fec-4331-885c-77b33cd99537\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.758320 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vwkhd\" (UniqueName: \"kubernetes.io/projected/5045e2fe-8fec-4331-885c-77b33cd99537-kube-api-access-vwkhd\") pod \"5045e2fe-8fec-4331-885c-77b33cd99537\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.758410 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-bundle\") pod \"5045e2fe-8fec-4331-885c-77b33cd99537\" (UID: \"5045e2fe-8fec-4331-885c-77b33cd99537\") " Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.759384 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-bundle" (OuterVolumeSpecName: "bundle") pod "5045e2fe-8fec-4331-885c-77b33cd99537" (UID: "5045e2fe-8fec-4331-885c-77b33cd99537"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.768462 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5045e2fe-8fec-4331-885c-77b33cd99537-kube-api-access-vwkhd" (OuterVolumeSpecName: "kube-api-access-vwkhd") pod "5045e2fe-8fec-4331-885c-77b33cd99537" (UID: "5045e2fe-8fec-4331-885c-77b33cd99537"). InnerVolumeSpecName "kube-api-access-vwkhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.768794 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-util" (OuterVolumeSpecName: "util") pod "5045e2fe-8fec-4331-885c-77b33cd99537" (UID: "5045e2fe-8fec-4331-885c-77b33cd99537"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.860653 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vwkhd\" (UniqueName: \"kubernetes.io/projected/5045e2fe-8fec-4331-885c-77b33cd99537-kube-api-access-vwkhd\") on node \"crc\" DevicePath \"\"" Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.860800 5010 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:41:13 crc kubenswrapper[5010]: I1126 15:41:13.860829 5010 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5045e2fe-8fec-4331-885c-77b33cd99537-util\") on node \"crc\" DevicePath \"\"" Nov 26 15:41:14 crc kubenswrapper[5010]: I1126 15:41:14.393676 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" event={"ID":"5045e2fe-8fec-4331-885c-77b33cd99537","Type":"ContainerDied","Data":"919dcdb8db755951d2012eddad41706d2ba06592ae1da039925e5520696ba472"} Nov 26 15:41:14 crc kubenswrapper[5010]: I1126 15:41:14.393760 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="919dcdb8db755951d2012eddad41706d2ba06592ae1da039925e5520696ba472" Nov 26 15:41:14 crc kubenswrapper[5010]: I1126 15:41:14.393869 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.616207 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2"] Nov 26 15:41:18 crc kubenswrapper[5010]: E1126 15:41:18.616878 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5045e2fe-8fec-4331-885c-77b33cd99537" containerName="util" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.616896 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5045e2fe-8fec-4331-885c-77b33cd99537" containerName="util" Nov 26 15:41:18 crc kubenswrapper[5010]: E1126 15:41:18.616915 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5045e2fe-8fec-4331-885c-77b33cd99537" containerName="extract" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.616922 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5045e2fe-8fec-4331-885c-77b33cd99537" containerName="extract" Nov 26 15:41:18 crc kubenswrapper[5010]: E1126 15:41:18.616937 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5045e2fe-8fec-4331-885c-77b33cd99537" containerName="pull" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.616946 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5045e2fe-8fec-4331-885c-77b33cd99537" containerName="pull" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.617088 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5045e2fe-8fec-4331-885c-77b33cd99537" containerName="extract" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.617636 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.622532 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-s2d9x" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.624173 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.625300 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.631193 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggvx9\" (UniqueName: \"kubernetes.io/projected/756a03ae-8964-4859-ab0b-52d1a9c164e2-kube-api-access-ggvx9\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jgkc2\" (UID: \"756a03ae-8964-4859-ab0b-52d1a9c164e2\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.631230 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/756a03ae-8964-4859-ab0b-52d1a9c164e2-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jgkc2\" (UID: \"756a03ae-8964-4859-ab0b-52d1a9c164e2\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.657587 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2"] Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.733056 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggvx9\" (UniqueName: \"kubernetes.io/projected/756a03ae-8964-4859-ab0b-52d1a9c164e2-kube-api-access-ggvx9\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jgkc2\" (UID: \"756a03ae-8964-4859-ab0b-52d1a9c164e2\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.733574 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/756a03ae-8964-4859-ab0b-52d1a9c164e2-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jgkc2\" (UID: \"756a03ae-8964-4859-ab0b-52d1a9c164e2\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.734192 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/756a03ae-8964-4859-ab0b-52d1a9c164e2-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jgkc2\" (UID: \"756a03ae-8964-4859-ab0b-52d1a9c164e2\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.761455 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggvx9\" (UniqueName: \"kubernetes.io/projected/756a03ae-8964-4859-ab0b-52d1a9c164e2-kube-api-access-ggvx9\") pod \"cert-manager-operator-controller-manager-64cf6dff88-jgkc2\" (UID: \"756a03ae-8964-4859-ab0b-52d1a9c164e2\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:18 crc kubenswrapper[5010]: I1126 15:41:18.936770 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" Nov 26 15:41:19 crc kubenswrapper[5010]: I1126 15:41:19.486496 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2"] Nov 26 15:41:19 crc kubenswrapper[5010]: W1126 15:41:19.498659 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod756a03ae_8964_4859_ab0b_52d1a9c164e2.slice/crio-ace89f63f9dfe98dac01742063df9359ca769b500f89aa63d4a39b68b2f26c22 WatchSource:0}: Error finding container ace89f63f9dfe98dac01742063df9359ca769b500f89aa63d4a39b68b2f26c22: Status 404 returned error can't find the container with id ace89f63f9dfe98dac01742063df9359ca769b500f89aa63d4a39b68b2f26c22 Nov 26 15:41:20 crc kubenswrapper[5010]: I1126 15:41:20.431186 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" event={"ID":"756a03ae-8964-4859-ab0b-52d1a9c164e2","Type":"ContainerStarted","Data":"ace89f63f9dfe98dac01742063df9359ca769b500f89aa63d4a39b68b2f26c22"} Nov 26 15:41:22 crc kubenswrapper[5010]: I1126 15:41:22.558221 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-pxvlg" Nov 26 15:41:28 crc kubenswrapper[5010]: I1126 15:41:28.510865 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" event={"ID":"756a03ae-8964-4859-ab0b-52d1a9c164e2","Type":"ContainerStarted","Data":"7c1c0d09c81d7d9a82bb18b15164c88a074be1586e7412e4481e213cc5463542"} Nov 26 15:41:28 crc kubenswrapper[5010]: I1126 15:41:28.547273 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-jgkc2" podStartSLOduration=1.907732883 podStartE2EDuration="10.547254837s" podCreationTimestamp="2025-11-26 15:41:18 +0000 UTC" firstStartedPulling="2025-11-26 15:41:19.502941119 +0000 UTC m=+900.293658267" lastFinishedPulling="2025-11-26 15:41:28.142463073 +0000 UTC m=+908.933180221" observedRunningTime="2025-11-26 15:41:28.54337709 +0000 UTC m=+909.334094238" watchObservedRunningTime="2025-11-26 15:41:28.547254837 +0000 UTC m=+909.337971985" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.457772 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-r7qmc"] Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.459392 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.461623 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.461623 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-mglbg" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.461952 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.470317 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-r7qmc"] Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.569938 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8crv7\" (UniqueName: \"kubernetes.io/projected/53dc96f8-9b73-42e8-ada1-7bf243575c6b-kube-api-access-8crv7\") pod \"cert-manager-webhook-f4fb5df64-r7qmc\" (UID: \"53dc96f8-9b73-42e8-ada1-7bf243575c6b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.570001 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/53dc96f8-9b73-42e8-ada1-7bf243575c6b-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-r7qmc\" (UID: \"53dc96f8-9b73-42e8-ada1-7bf243575c6b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.671089 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/53dc96f8-9b73-42e8-ada1-7bf243575c6b-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-r7qmc\" (UID: \"53dc96f8-9b73-42e8-ada1-7bf243575c6b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.671245 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8crv7\" (UniqueName: \"kubernetes.io/projected/53dc96f8-9b73-42e8-ada1-7bf243575c6b-kube-api-access-8crv7\") pod \"cert-manager-webhook-f4fb5df64-r7qmc\" (UID: \"53dc96f8-9b73-42e8-ada1-7bf243575c6b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.691195 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/53dc96f8-9b73-42e8-ada1-7bf243575c6b-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-r7qmc\" (UID: \"53dc96f8-9b73-42e8-ada1-7bf243575c6b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.697018 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8crv7\" (UniqueName: \"kubernetes.io/projected/53dc96f8-9b73-42e8-ada1-7bf243575c6b-kube-api-access-8crv7\") pod \"cert-manager-webhook-f4fb5df64-r7qmc\" (UID: \"53dc96f8-9b73-42e8-ada1-7bf243575c6b\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:33 crc kubenswrapper[5010]: I1126 15:41:33.783495 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:34 crc kubenswrapper[5010]: I1126 15:41:34.276238 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-r7qmc"] Nov 26 15:41:34 crc kubenswrapper[5010]: I1126 15:41:34.554201 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" event={"ID":"53dc96f8-9b73-42e8-ada1-7bf243575c6b","Type":"ContainerStarted","Data":"385d3f7212fb07a72ca0254eff174fb5fbce7f4caeab3b676c6c16072306dc7a"} Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.218324 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-rkkql"] Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.219921 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.226104 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-kdxh4" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.236011 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-rkkql"] Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.322315 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35029056-31b7-46c8-9ac0-93d2c36ae95f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-rkkql\" (UID: \"35029056-31b7-46c8-9ac0-93d2c36ae95f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.322410 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvswg\" (UniqueName: \"kubernetes.io/projected/35029056-31b7-46c8-9ac0-93d2c36ae95f-kube-api-access-xvswg\") pod \"cert-manager-cainjector-855d9ccff4-rkkql\" (UID: \"35029056-31b7-46c8-9ac0-93d2c36ae95f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.423649 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvswg\" (UniqueName: \"kubernetes.io/projected/35029056-31b7-46c8-9ac0-93d2c36ae95f-kube-api-access-xvswg\") pod \"cert-manager-cainjector-855d9ccff4-rkkql\" (UID: \"35029056-31b7-46c8-9ac0-93d2c36ae95f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.423792 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35029056-31b7-46c8-9ac0-93d2c36ae95f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-rkkql\" (UID: \"35029056-31b7-46c8-9ac0-93d2c36ae95f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.447898 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35029056-31b7-46c8-9ac0-93d2c36ae95f-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-rkkql\" (UID: \"35029056-31b7-46c8-9ac0-93d2c36ae95f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.451586 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvswg\" (UniqueName: \"kubernetes.io/projected/35029056-31b7-46c8-9ac0-93d2c36ae95f-kube-api-access-xvswg\") pod \"cert-manager-cainjector-855d9ccff4-rkkql\" (UID: \"35029056-31b7-46c8-9ac0-93d2c36ae95f\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:36 crc kubenswrapper[5010]: I1126 15:41:36.561250 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" Nov 26 15:41:37 crc kubenswrapper[5010]: I1126 15:41:37.009569 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-rkkql"] Nov 26 15:41:37 crc kubenswrapper[5010]: I1126 15:41:37.582383 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" event={"ID":"35029056-31b7-46c8-9ac0-93d2c36ae95f","Type":"ContainerStarted","Data":"964839fee3ba49a2b3aff442fdb905ef2f25f29170a038b7ab3c8579eaed9152"} Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.423322 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.423954 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.424017 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.424868 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"866a4d79b3a741e66d3af7f04184bb9e206692b2113aca2fc0a5c00bbc84fa10"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.424954 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://866a4d79b3a741e66d3af7f04184bb9e206692b2113aca2fc0a5c00bbc84fa10" gracePeriod=600 Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.619632 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="866a4d79b3a741e66d3af7f04184bb9e206692b2113aca2fc0a5c00bbc84fa10" exitCode=0 Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.619700 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"866a4d79b3a741e66d3af7f04184bb9e206692b2113aca2fc0a5c00bbc84fa10"} Nov 26 15:41:41 crc kubenswrapper[5010]: I1126 15:41:41.619784 5010 scope.go:117] "RemoveContainer" containerID="07de4390fc3c8495bcdc1f46830e2b986f1ed25110c72eb2d6d31304d8ef46ee" Nov 26 15:41:42 crc kubenswrapper[5010]: I1126 15:41:42.630243 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"59f84423fa85afba142264d8718184fcb64f0d905168b9c5b86ca7f3cd897062"} Nov 26 15:41:43 crc kubenswrapper[5010]: I1126 15:41:43.641137 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" event={"ID":"35029056-31b7-46c8-9ac0-93d2c36ae95f","Type":"ContainerStarted","Data":"f36a7314731c1b61594d50806db0e6e68d76a86d122f396913f3248f6e0e534f"} Nov 26 15:41:43 crc kubenswrapper[5010]: I1126 15:41:43.644354 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" event={"ID":"53dc96f8-9b73-42e8-ada1-7bf243575c6b","Type":"ContainerStarted","Data":"0851bbd55068f8db15933132ebba6031ac03165e08396ee943eca4d3a4c45a25"} Nov 26 15:41:43 crc kubenswrapper[5010]: I1126 15:41:43.644525 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:43 crc kubenswrapper[5010]: I1126 15:41:43.663473 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-rkkql" podStartSLOduration=2.253235032 podStartE2EDuration="7.663448978s" podCreationTimestamp="2025-11-26 15:41:36 +0000 UTC" firstStartedPulling="2025-11-26 15:41:37.028616944 +0000 UTC m=+917.819334092" lastFinishedPulling="2025-11-26 15:41:42.43883088 +0000 UTC m=+923.229548038" observedRunningTime="2025-11-26 15:41:43.657558741 +0000 UTC m=+924.448275889" watchObservedRunningTime="2025-11-26 15:41:43.663448978 +0000 UTC m=+924.454166126" Nov 26 15:41:43 crc kubenswrapper[5010]: I1126 15:41:43.680640 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" podStartSLOduration=2.494503141 podStartE2EDuration="10.680599766s" podCreationTimestamp="2025-11-26 15:41:33 +0000 UTC" firstStartedPulling="2025-11-26 15:41:34.287892452 +0000 UTC m=+915.078609640" lastFinishedPulling="2025-11-26 15:41:42.473989117 +0000 UTC m=+923.264706265" observedRunningTime="2025-11-26 15:41:43.674837492 +0000 UTC m=+924.465554670" watchObservedRunningTime="2025-11-26 15:41:43.680599766 +0000 UTC m=+924.471316954" Nov 26 15:41:48 crc kubenswrapper[5010]: I1126 15:41:48.787020 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-r7qmc" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.622763 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6pqsn"] Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.624055 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.628853 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-8wrrj" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.636411 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6pqsn"] Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.654879 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2b95\" (UniqueName: \"kubernetes.io/projected/ad59753d-a191-4ef5-9945-d1126e81bb8e-kube-api-access-f2b95\") pod \"cert-manager-86cb77c54b-6pqsn\" (UID: \"ad59753d-a191-4ef5-9945-d1126e81bb8e\") " pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.655366 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ad59753d-a191-4ef5-9945-d1126e81bb8e-bound-sa-token\") pod \"cert-manager-86cb77c54b-6pqsn\" (UID: \"ad59753d-a191-4ef5-9945-d1126e81bb8e\") " pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.756969 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2b95\" (UniqueName: \"kubernetes.io/projected/ad59753d-a191-4ef5-9945-d1126e81bb8e-kube-api-access-f2b95\") pod \"cert-manager-86cb77c54b-6pqsn\" (UID: \"ad59753d-a191-4ef5-9945-d1126e81bb8e\") " pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.757059 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ad59753d-a191-4ef5-9945-d1126e81bb8e-bound-sa-token\") pod \"cert-manager-86cb77c54b-6pqsn\" (UID: \"ad59753d-a191-4ef5-9945-d1126e81bb8e\") " pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.778916 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2b95\" (UniqueName: \"kubernetes.io/projected/ad59753d-a191-4ef5-9945-d1126e81bb8e-kube-api-access-f2b95\") pod \"cert-manager-86cb77c54b-6pqsn\" (UID: \"ad59753d-a191-4ef5-9945-d1126e81bb8e\") " pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.779167 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ad59753d-a191-4ef5-9945-d1126e81bb8e-bound-sa-token\") pod \"cert-manager-86cb77c54b-6pqsn\" (UID: \"ad59753d-a191-4ef5-9945-d1126e81bb8e\") " pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:52 crc kubenswrapper[5010]: I1126 15:41:52.987533 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 15:41:53 crc kubenswrapper[5010]: I1126 15:41:53.289206 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6pqsn"] Nov 26 15:41:53 crc kubenswrapper[5010]: I1126 15:41:53.714734 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" event={"ID":"ad59753d-a191-4ef5-9945-d1126e81bb8e","Type":"ContainerStarted","Data":"457a9a7838ee1d7e0719d0351a12a74ee98c606335404e5e99269c340fe4c210"} Nov 26 15:41:53 crc kubenswrapper[5010]: I1126 15:41:53.715244 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" event={"ID":"ad59753d-a191-4ef5-9945-d1126e81bb8e","Type":"ContainerStarted","Data":"ecdd48fae5f222883eb5e6a143a5e98dab4cccc93df425ec32fb03c36cde007f"} Nov 26 15:41:53 crc kubenswrapper[5010]: I1126 15:41:53.745254 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" podStartSLOduration=1.745228574 podStartE2EDuration="1.745228574s" podCreationTimestamp="2025-11-26 15:41:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:41:53.738087305 +0000 UTC m=+934.528804483" watchObservedRunningTime="2025-11-26 15:41:53.745228574 +0000 UTC m=+934.535945732" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.204528 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-56fnh"] Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.206378 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.208540 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.210279 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-vvd9k" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.210595 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.226335 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-56fnh"] Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.332795 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8rct\" (UniqueName: \"kubernetes.io/projected/5a87f5af-beea-4084-8351-4d333378baf8-kube-api-access-f8rct\") pod \"openstack-operator-index-56fnh\" (UID: \"5a87f5af-beea-4084-8351-4d333378baf8\") " pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.434554 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8rct\" (UniqueName: \"kubernetes.io/projected/5a87f5af-beea-4084-8351-4d333378baf8-kube-api-access-f8rct\") pod \"openstack-operator-index-56fnh\" (UID: \"5a87f5af-beea-4084-8351-4d333378baf8\") " pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.459297 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8rct\" (UniqueName: \"kubernetes.io/projected/5a87f5af-beea-4084-8351-4d333378baf8-kube-api-access-f8rct\") pod \"openstack-operator-index-56fnh\" (UID: \"5a87f5af-beea-4084-8351-4d333378baf8\") " pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.541293 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.793022 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-56fnh"] Nov 26 15:42:06 crc kubenswrapper[5010]: I1126 15:42:06.819950 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-56fnh" event={"ID":"5a87f5af-beea-4084-8351-4d333378baf8","Type":"ContainerStarted","Data":"11e761c8abbaffd46dc99f60a6acb10d4fc92bd85606f121e9112ae1ff0670cf"} Nov 26 15:42:14 crc kubenswrapper[5010]: I1126 15:42:14.894450 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-56fnh" event={"ID":"5a87f5af-beea-4084-8351-4d333378baf8","Type":"ContainerStarted","Data":"607940fd9ef82207a1878b3e66132969c8bf17875059de4bd55aa362386ab036"} Nov 26 15:42:18 crc kubenswrapper[5010]: I1126 15:42:18.962299 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-56fnh" podStartSLOduration=7.051883207 podStartE2EDuration="12.962267598s" podCreationTimestamp="2025-11-26 15:42:06 +0000 UTC" firstStartedPulling="2025-11-26 15:42:06.802940075 +0000 UTC m=+947.593657223" lastFinishedPulling="2025-11-26 15:42:12.713324426 +0000 UTC m=+953.504041614" observedRunningTime="2025-11-26 15:42:18.956479354 +0000 UTC m=+959.747196592" watchObservedRunningTime="2025-11-26 15:42:18.962267598 +0000 UTC m=+959.752984756" Nov 26 15:42:26 crc kubenswrapper[5010]: I1126 15:42:26.541929 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:26 crc kubenswrapper[5010]: I1126 15:42:26.543194 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:26 crc kubenswrapper[5010]: I1126 15:42:26.587782 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:27 crc kubenswrapper[5010]: I1126 15:42:27.036085 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-56fnh" Nov 26 15:42:31 crc kubenswrapper[5010]: I1126 15:42:31.982648 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5"] Nov 26 15:42:31 crc kubenswrapper[5010]: I1126 15:42:31.985179 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:31 crc kubenswrapper[5010]: I1126 15:42:31.988619 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-pjv4m" Nov 26 15:42:31 crc kubenswrapper[5010]: I1126 15:42:31.999437 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5"] Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.111865 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxx98\" (UniqueName: \"kubernetes.io/projected/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-kube-api-access-nxx98\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.112083 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-util\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.112159 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-bundle\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.213498 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxx98\" (UniqueName: \"kubernetes.io/projected/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-kube-api-access-nxx98\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.214161 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-util\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.214252 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-bundle\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.215039 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-bundle\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.215289 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-util\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.240962 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxx98\" (UniqueName: \"kubernetes.io/projected/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-kube-api-access-nxx98\") pod \"3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.321004 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:32 crc kubenswrapper[5010]: I1126 15:42:32.589231 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5"] Nov 26 15:42:33 crc kubenswrapper[5010]: I1126 15:42:33.055342 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" event={"ID":"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae","Type":"ContainerStarted","Data":"e5999dba19eb486bf5dcc3219d2bbc4d39fb52cbae15c1fdefdafe0dea59b423"} Nov 26 15:42:33 crc kubenswrapper[5010]: E1126 15:42:33.971388 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6d3c5be9_3acf_4cf9_bfda_54ab8d80f3ae.slice/crio-e34e290bcf89cb5fa5d2c383e4dbcc4ff6bbc2b2ded7fbeae31d8edef7dab5e0.scope\": RecentStats: unable to find data in memory cache]" Nov 26 15:42:34 crc kubenswrapper[5010]: I1126 15:42:34.066387 5010 generic.go:334] "Generic (PLEG): container finished" podID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerID="e34e290bcf89cb5fa5d2c383e4dbcc4ff6bbc2b2ded7fbeae31d8edef7dab5e0" exitCode=0 Nov 26 15:42:34 crc kubenswrapper[5010]: I1126 15:42:34.066472 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" event={"ID":"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae","Type":"ContainerDied","Data":"e34e290bcf89cb5fa5d2c383e4dbcc4ff6bbc2b2ded7fbeae31d8edef7dab5e0"} Nov 26 15:42:37 crc kubenswrapper[5010]: I1126 15:42:37.093745 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" event={"ID":"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae","Type":"ContainerStarted","Data":"ed46457d9b72dd49b8862edaf5ba5edce743f7950145f47176e1832a2d7db9fd"} Nov 26 15:42:38 crc kubenswrapper[5010]: I1126 15:42:38.105245 5010 generic.go:334] "Generic (PLEG): container finished" podID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerID="ed46457d9b72dd49b8862edaf5ba5edce743f7950145f47176e1832a2d7db9fd" exitCode=0 Nov 26 15:42:38 crc kubenswrapper[5010]: I1126 15:42:38.105333 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" event={"ID":"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae","Type":"ContainerDied","Data":"ed46457d9b72dd49b8862edaf5ba5edce743f7950145f47176e1832a2d7db9fd"} Nov 26 15:42:39 crc kubenswrapper[5010]: I1126 15:42:39.117423 5010 generic.go:334] "Generic (PLEG): container finished" podID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerID="14f738551b85a465d578c12fa2beba9e122b7413c7564a3059fbd7969e063567" exitCode=0 Nov 26 15:42:39 crc kubenswrapper[5010]: I1126 15:42:39.117481 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" event={"ID":"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae","Type":"ContainerDied","Data":"14f738551b85a465d578c12fa2beba9e122b7413c7564a3059fbd7969e063567"} Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.481882 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.655632 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxx98\" (UniqueName: \"kubernetes.io/projected/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-kube-api-access-nxx98\") pod \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.655739 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-util\") pod \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.655861 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-bundle\") pod \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\" (UID: \"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae\") " Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.656698 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-bundle" (OuterVolumeSpecName: "bundle") pod "6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" (UID: "6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.663633 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-kube-api-access-nxx98" (OuterVolumeSpecName: "kube-api-access-nxx98") pod "6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" (UID: "6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae"). InnerVolumeSpecName "kube-api-access-nxx98". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.667788 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-util" (OuterVolumeSpecName: "util") pod "6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" (UID: "6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.757503 5010 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.757551 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxx98\" (UniqueName: \"kubernetes.io/projected/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-kube-api-access-nxx98\") on node \"crc\" DevicePath \"\"" Nov 26 15:42:40 crc kubenswrapper[5010]: I1126 15:42:40.757564 5010 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae-util\") on node \"crc\" DevicePath \"\"" Nov 26 15:42:41 crc kubenswrapper[5010]: I1126 15:42:41.139244 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" event={"ID":"6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae","Type":"ContainerDied","Data":"e5999dba19eb486bf5dcc3219d2bbc4d39fb52cbae15c1fdefdafe0dea59b423"} Nov 26 15:42:41 crc kubenswrapper[5010]: I1126 15:42:41.139322 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5999dba19eb486bf5dcc3219d2bbc4d39fb52cbae15c1fdefdafe0dea59b423" Nov 26 15:42:41 crc kubenswrapper[5010]: I1126 15:42:41.139354 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5" Nov 26 15:42:43 crc kubenswrapper[5010]: I1126 15:42:43.985736 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh"] Nov 26 15:42:43 crc kubenswrapper[5010]: E1126 15:42:43.986634 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerName="pull" Nov 26 15:42:43 crc kubenswrapper[5010]: I1126 15:42:43.986653 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerName="pull" Nov 26 15:42:43 crc kubenswrapper[5010]: E1126 15:42:43.986686 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerName="util" Nov 26 15:42:43 crc kubenswrapper[5010]: I1126 15:42:43.986695 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerName="util" Nov 26 15:42:43 crc kubenswrapper[5010]: E1126 15:42:43.986733 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerName="extract" Nov 26 15:42:43 crc kubenswrapper[5010]: I1126 15:42:43.986743 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerName="extract" Nov 26 15:42:43 crc kubenswrapper[5010]: I1126 15:42:43.986890 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae" containerName="extract" Nov 26 15:42:43 crc kubenswrapper[5010]: I1126 15:42:43.987529 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 15:42:43 crc kubenswrapper[5010]: I1126 15:42:43.989824 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-8f5cm" Nov 26 15:42:44 crc kubenswrapper[5010]: I1126 15:42:44.033985 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh"] Nov 26 15:42:44 crc kubenswrapper[5010]: I1126 15:42:44.036008 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5qh5\" (UniqueName: \"kubernetes.io/projected/a3bc645d-4358-47cb-9e3b-ebc975c69092-kube-api-access-q5qh5\") pod \"openstack-operator-controller-operator-544fb75865-bd9lh\" (UID: \"a3bc645d-4358-47cb-9e3b-ebc975c69092\") " pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 15:42:44 crc kubenswrapper[5010]: I1126 15:42:44.137520 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5qh5\" (UniqueName: \"kubernetes.io/projected/a3bc645d-4358-47cb-9e3b-ebc975c69092-kube-api-access-q5qh5\") pod \"openstack-operator-controller-operator-544fb75865-bd9lh\" (UID: \"a3bc645d-4358-47cb-9e3b-ebc975c69092\") " pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 15:42:44 crc kubenswrapper[5010]: I1126 15:42:44.161094 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5qh5\" (UniqueName: \"kubernetes.io/projected/a3bc645d-4358-47cb-9e3b-ebc975c69092-kube-api-access-q5qh5\") pod \"openstack-operator-controller-operator-544fb75865-bd9lh\" (UID: \"a3bc645d-4358-47cb-9e3b-ebc975c69092\") " pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 15:42:44 crc kubenswrapper[5010]: I1126 15:42:44.305186 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 15:42:44 crc kubenswrapper[5010]: I1126 15:42:44.564737 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh"] Nov 26 15:42:45 crc kubenswrapper[5010]: I1126 15:42:45.169317 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" event={"ID":"a3bc645d-4358-47cb-9e3b-ebc975c69092","Type":"ContainerStarted","Data":"b97e9bd0ae3f924ca5a4dfcdad641bed39c8ea5fa51e61561f97bf1dba7809c3"} Nov 26 15:42:51 crc kubenswrapper[5010]: I1126 15:42:51.259265 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" event={"ID":"a3bc645d-4358-47cb-9e3b-ebc975c69092","Type":"ContainerStarted","Data":"ba1e6a30e347fd8cad385f5d5d6a8d57bc5748d5f8906078aeff12ba6567ef19"} Nov 26 15:42:51 crc kubenswrapper[5010]: I1126 15:42:51.260085 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 15:42:51 crc kubenswrapper[5010]: I1126 15:42:51.314473 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" podStartSLOduration=2.005733729 podStartE2EDuration="8.314451565s" podCreationTimestamp="2025-11-26 15:42:43 +0000 UTC" firstStartedPulling="2025-11-26 15:42:44.581604753 +0000 UTC m=+985.372321911" lastFinishedPulling="2025-11-26 15:42:50.890322589 +0000 UTC m=+991.681039747" observedRunningTime="2025-11-26 15:42:51.309237094 +0000 UTC m=+992.099954252" watchObservedRunningTime="2025-11-26 15:42:51.314451565 +0000 UTC m=+992.105168713" Nov 26 15:43:04 crc kubenswrapper[5010]: I1126 15:43:04.309836 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.540122 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.541818 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.544751 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-9jvtv" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.548051 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.549124 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.551408 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-nbfc9" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.555624 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.581093 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-qmr28"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.582450 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.584523 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-bk9gq" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.595579 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.605113 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-qmr28"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.610422 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.611485 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.613694 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-8bj6n" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.675478 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.701841 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8s4mh\" (UniqueName: \"kubernetes.io/projected/a4bbf592-007c-4176-a6a3-0209b33b6048-kube-api-access-8s4mh\") pod \"designate-operator-controller-manager-955677c94-qmr28\" (UID: \"a4bbf592-007c-4176-a6a3-0209b33b6048\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.702244 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwlb4\" (UniqueName: \"kubernetes.io/projected/6a970d68-d885-4fc2-9d58-508537a42572-kube-api-access-fwlb4\") pod \"cinder-operator-controller-manager-6b7f75547b-sbppr\" (UID: \"6a970d68-d885-4fc2-9d58-508537a42572\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.702278 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j8tl\" (UniqueName: \"kubernetes.io/projected/9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f-kube-api-access-5j8tl\") pod \"barbican-operator-controller-manager-7b64f4fb85-c89k7\" (UID: \"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.702331 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf8h4\" (UniqueName: \"kubernetes.io/projected/b6c13a13-621b-45cb-9830-4dfaf15ee06b-kube-api-access-qf8h4\") pod \"glance-operator-controller-manager-589cbd6b5b-4w8ql\" (UID: \"b6c13a13-621b-45cb-9830-4dfaf15ee06b\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.711003 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-p5446"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.735960 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.741124 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-lklfh" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.745770 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.746925 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.755540 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-twrjt" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.776740 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.793810 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-p5446"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.803771 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8s4mh\" (UniqueName: \"kubernetes.io/projected/a4bbf592-007c-4176-a6a3-0209b33b6048-kube-api-access-8s4mh\") pod \"designate-operator-controller-manager-955677c94-qmr28\" (UID: \"a4bbf592-007c-4176-a6a3-0209b33b6048\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.803828 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwlb4\" (UniqueName: \"kubernetes.io/projected/6a970d68-d885-4fc2-9d58-508537a42572-kube-api-access-fwlb4\") pod \"cinder-operator-controller-manager-6b7f75547b-sbppr\" (UID: \"6a970d68-d885-4fc2-9d58-508537a42572\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.803851 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j8tl\" (UniqueName: \"kubernetes.io/projected/9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f-kube-api-access-5j8tl\") pod \"barbican-operator-controller-manager-7b64f4fb85-c89k7\" (UID: \"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.803879 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf8h4\" (UniqueName: \"kubernetes.io/projected/b6c13a13-621b-45cb-9830-4dfaf15ee06b-kube-api-access-qf8h4\") pod \"glance-operator-controller-manager-589cbd6b5b-4w8ql\" (UID: \"b6c13a13-621b-45cb-9830-4dfaf15ee06b\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.811073 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-sxdct"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.812275 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.819193 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.820369 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.823590 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.825600 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.826515 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.826998 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-dprmt" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.827191 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-69lb8" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.828785 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.849280 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-2w77w" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.851870 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-sxdct"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.863722 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j8tl\" (UniqueName: \"kubernetes.io/projected/9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f-kube-api-access-5j8tl\") pod \"barbican-operator-controller-manager-7b64f4fb85-c89k7\" (UID: \"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f\") " pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.864157 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.869579 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwlb4\" (UniqueName: \"kubernetes.io/projected/6a970d68-d885-4fc2-9d58-508537a42572-kube-api-access-fwlb4\") pod \"cinder-operator-controller-manager-6b7f75547b-sbppr\" (UID: \"6a970d68-d885-4fc2-9d58-508537a42572\") " pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.870046 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8s4mh\" (UniqueName: \"kubernetes.io/projected/a4bbf592-007c-4176-a6a3-0209b33b6048-kube-api-access-8s4mh\") pod \"designate-operator-controller-manager-955677c94-qmr28\" (UID: \"a4bbf592-007c-4176-a6a3-0209b33b6048\") " pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.874887 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.879372 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.879447 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf8h4\" (UniqueName: \"kubernetes.io/projected/b6c13a13-621b-45cb-9830-4dfaf15ee06b-kube-api-access-qf8h4\") pod \"glance-operator-controller-manager-589cbd6b5b-4w8ql\" (UID: \"b6c13a13-621b-45cb-9830-4dfaf15ee06b\") " pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.879853 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.880892 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.886180 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-lvcxf" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.906165 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.907325 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.907894 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.909333 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.919453 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fbs2z\" (UniqueName: \"kubernetes.io/projected/8b2b09a7-2b17-43da-ae0e-4448b96eed50-kube-api-access-fbs2z\") pod \"horizon-operator-controller-manager-5d494799bf-mc96z\" (UID: \"8b2b09a7-2b17-43da-ae0e-4448b96eed50\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.919545 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x4vk\" (UniqueName: \"kubernetes.io/projected/ec8d3bdf-fc89-426b-82e9-a1ae81a3e548-kube-api-access-6x4vk\") pod \"ironic-operator-controller-manager-67cb4dc6d4-dhngn\" (UID: \"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.919692 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.919819 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svgbw\" (UniqueName: \"kubernetes.io/projected/7ec0a644-00e0-4b67-b2ad-7a7128dcaf19-kube-api-access-svgbw\") pod \"heat-operator-controller-manager-5b77f656f-p5446\" (UID: \"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.919853 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2gzh\" (UniqueName: \"kubernetes.io/projected/ce1fedbc-31da-4c37-9731-34e79ab604f4-kube-api-access-z2gzh\") pod \"keystone-operator-controller-manager-7b4567c7cf-9lx7h\" (UID: \"ce1fedbc-31da-4c37-9731-34e79ab604f4\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.919873 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkbgj\" (UniqueName: \"kubernetes.io/projected/93625d2a-6f36-43a8-b26c-8f6506955b15-kube-api-access-zkbgj\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.926045 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-94p6q" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.933868 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.933923 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.934017 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.944959 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.957665 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.958756 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.959529 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.960232 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.960325 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.961018 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-bxffz" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.963512 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-hww96" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.975314 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.975572 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-42sx8" Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.989437 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj"] Nov 26 15:43:24 crc kubenswrapper[5010]: I1126 15:43:24.999823 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.001835 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.007130 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.009808 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.010117 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-8g267" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026444 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fbs2z\" (UniqueName: \"kubernetes.io/projected/8b2b09a7-2b17-43da-ae0e-4448b96eed50-kube-api-access-fbs2z\") pod \"horizon-operator-controller-manager-5d494799bf-mc96z\" (UID: \"8b2b09a7-2b17-43da-ae0e-4448b96eed50\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026545 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x4vk\" (UniqueName: \"kubernetes.io/projected/ec8d3bdf-fc89-426b-82e9-a1ae81a3e548-kube-api-access-6x4vk\") pod \"ironic-operator-controller-manager-67cb4dc6d4-dhngn\" (UID: \"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026626 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026656 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrhkh\" (UniqueName: \"kubernetes.io/projected/7e5769c2-7f83-41ff-9365-7f5792e8d81b-kube-api-access-vrhkh\") pod \"manila-operator-controller-manager-5d499bf58b-k7vx2\" (UID: \"7e5769c2-7f83-41ff-9365-7f5792e8d81b\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026720 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svgbw\" (UniqueName: \"kubernetes.io/projected/7ec0a644-00e0-4b67-b2ad-7a7128dcaf19-kube-api-access-svgbw\") pod \"heat-operator-controller-manager-5b77f656f-p5446\" (UID: \"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026746 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwj8q\" (UniqueName: \"kubernetes.io/projected/dfb4a15b-a139-4778-acc7-f236e947ca96-kube-api-access-rwj8q\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-sj6tg\" (UID: \"dfb4a15b-a139-4778-acc7-f236e947ca96\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026786 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2gzh\" (UniqueName: \"kubernetes.io/projected/ce1fedbc-31da-4c37-9731-34e79ab604f4-kube-api-access-z2gzh\") pod \"keystone-operator-controller-manager-7b4567c7cf-9lx7h\" (UID: \"ce1fedbc-31da-4c37-9731-34e79ab604f4\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026815 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkbgj\" (UniqueName: \"kubernetes.io/projected/93625d2a-6f36-43a8-b26c-8f6506955b15-kube-api-access-zkbgj\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.026907 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw4lx\" (UniqueName: \"kubernetes.io/projected/191eef94-8fdf-4180-8ce0-1d62fc3f0de0-kube-api-access-bw4lx\") pod \"neutron-operator-controller-manager-6fdcddb789-f64fd\" (UID: \"191eef94-8fdf-4180-8ce0-1d62fc3f0de0\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.028342 5010 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.028393 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert podName:93625d2a-6f36-43a8-b26c-8f6506955b15 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:25.528375565 +0000 UTC m=+1026.319092713 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert") pod "infra-operator-controller-manager-57548d458d-sxdct" (UID: "93625d2a-6f36-43a8-b26c-8f6506955b15") : secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.046878 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.048224 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.049876 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fbs2z\" (UniqueName: \"kubernetes.io/projected/8b2b09a7-2b17-43da-ae0e-4448b96eed50-kube-api-access-fbs2z\") pod \"horizon-operator-controller-manager-5d494799bf-mc96z\" (UID: \"8b2b09a7-2b17-43da-ae0e-4448b96eed50\") " pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.055297 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.062617 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-gp9n4" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.064089 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.077330 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkbgj\" (UniqueName: \"kubernetes.io/projected/93625d2a-6f36-43a8-b26c-8f6506955b15-kube-api-access-zkbgj\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.078622 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-694mw" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.082120 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.082892 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2gzh\" (UniqueName: \"kubernetes.io/projected/ce1fedbc-31da-4c37-9731-34e79ab604f4-kube-api-access-z2gzh\") pod \"keystone-operator-controller-manager-7b4567c7cf-9lx7h\" (UID: \"ce1fedbc-31da-4c37-9731-34e79ab604f4\") " pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.097560 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x4vk\" (UniqueName: \"kubernetes.io/projected/ec8d3bdf-fc89-426b-82e9-a1ae81a3e548-kube-api-access-6x4vk\") pod \"ironic-operator-controller-manager-67cb4dc6d4-dhngn\" (UID: \"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548\") " pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.098091 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svgbw\" (UniqueName: \"kubernetes.io/projected/7ec0a644-00e0-4b67-b2ad-7a7128dcaf19-kube-api-access-svgbw\") pod \"heat-operator-controller-manager-5b77f656f-p5446\" (UID: \"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19\") " pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.106448 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.111013 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.114417 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.122149 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.131088 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw4lx\" (UniqueName: \"kubernetes.io/projected/191eef94-8fdf-4180-8ce0-1d62fc3f0de0-kube-api-access-bw4lx\") pod \"neutron-operator-controller-manager-6fdcddb789-f64fd\" (UID: \"191eef94-8fdf-4180-8ce0-1d62fc3f0de0\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.142378 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nkbq\" (UniqueName: \"kubernetes.io/projected/b4799b0e-11ed-4331-84d1-daf581d00bbe-kube-api-access-2nkbq\") pod \"octavia-operator-controller-manager-64cdc6ff96-fx8tr\" (UID: \"b4799b0e-11ed-4331-84d1-daf581d00bbe\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.142596 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pd6fv\" (UniqueName: \"kubernetes.io/projected/b0d7107e-a617-4a7b-a6e3-0267996965ef-kube-api-access-pd6fv\") pod \"ovn-operator-controller-manager-56897c768d-gcj9h\" (UID: \"b0d7107e-a617-4a7b-a6e3-0267996965ef\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.142826 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zrc5\" (UniqueName: \"kubernetes.io/projected/3daf5f1d-5d15-4b93-ac0b-8209060a0557-kube-api-access-6zrc5\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.143067 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmmlm\" (UniqueName: \"kubernetes.io/projected/05194bfa-88c3-4826-8a59-6d62252e4b1a-kube-api-access-rmmlm\") pod \"nova-operator-controller-manager-79556f57fc-5llrj\" (UID: \"05194bfa-88c3-4826-8a59-6d62252e4b1a\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.143215 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.143392 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrhkh\" (UniqueName: \"kubernetes.io/projected/7e5769c2-7f83-41ff-9365-7f5792e8d81b-kube-api-access-vrhkh\") pod \"manila-operator-controller-manager-5d499bf58b-k7vx2\" (UID: \"7e5769c2-7f83-41ff-9365-7f5792e8d81b\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.143521 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwj8q\" (UniqueName: \"kubernetes.io/projected/dfb4a15b-a139-4778-acc7-f236e947ca96-kube-api-access-rwj8q\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-sj6tg\" (UID: \"dfb4a15b-a139-4778-acc7-f236e947ca96\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.183275 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrhkh\" (UniqueName: \"kubernetes.io/projected/7e5769c2-7f83-41ff-9365-7f5792e8d81b-kube-api-access-vrhkh\") pod \"manila-operator-controller-manager-5d499bf58b-k7vx2\" (UID: \"7e5769c2-7f83-41ff-9365-7f5792e8d81b\") " pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.186443 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw4lx\" (UniqueName: \"kubernetes.io/projected/191eef94-8fdf-4180-8ce0-1d62fc3f0de0-kube-api-access-bw4lx\") pod \"neutron-operator-controller-manager-6fdcddb789-f64fd\" (UID: \"191eef94-8fdf-4180-8ce0-1d62fc3f0de0\") " pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.206842 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwj8q\" (UniqueName: \"kubernetes.io/projected/dfb4a15b-a139-4778-acc7-f236e947ca96-kube-api-access-rwj8q\") pod \"mariadb-operator-controller-manager-66f4dd4bc7-sj6tg\" (UID: \"dfb4a15b-a139-4778-acc7-f236e947ca96\") " pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.218322 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.248565 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.250605 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmmlm\" (UniqueName: \"kubernetes.io/projected/05194bfa-88c3-4826-8a59-6d62252e4b1a-kube-api-access-rmmlm\") pod \"nova-operator-controller-manager-79556f57fc-5llrj\" (UID: \"05194bfa-88c3-4826-8a59-6d62252e4b1a\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.250656 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.250998 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nkbq\" (UniqueName: \"kubernetes.io/projected/b4799b0e-11ed-4331-84d1-daf581d00bbe-kube-api-access-2nkbq\") pod \"octavia-operator-controller-manager-64cdc6ff96-fx8tr\" (UID: \"b4799b0e-11ed-4331-84d1-daf581d00bbe\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.251020 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pd6fv\" (UniqueName: \"kubernetes.io/projected/b0d7107e-a617-4a7b-a6e3-0267996965ef-kube-api-access-pd6fv\") pod \"ovn-operator-controller-manager-56897c768d-gcj9h\" (UID: \"b0d7107e-a617-4a7b-a6e3-0267996965ef\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.251095 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zrc5\" (UniqueName: \"kubernetes.io/projected/3daf5f1d-5d15-4b93-ac0b-8209060a0557-kube-api-access-6zrc5\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.251151 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzw4r\" (UniqueName: \"kubernetes.io/projected/bf155072-f786-47eb-9455-f807444d12e9-kube-api-access-pzw4r\") pod \"placement-operator-controller-manager-57988cc5b5-zq8vc\" (UID: \"bf155072-f786-47eb-9455-f807444d12e9\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.251771 5010 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.251822 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert podName:3daf5f1d-5d15-4b93-ac0b-8209060a0557 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:25.751806558 +0000 UTC m=+1026.542523706 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" (UID: "3daf5f1d-5d15-4b93-ac0b-8209060a0557") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.257130 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-nfl24"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.259466 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.265564 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-j5hnm" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.275612 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.277066 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.283203 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nkbq\" (UniqueName: \"kubernetes.io/projected/b4799b0e-11ed-4331-84d1-daf581d00bbe-kube-api-access-2nkbq\") pod \"octavia-operator-controller-manager-64cdc6ff96-fx8tr\" (UID: \"b4799b0e-11ed-4331-84d1-daf581d00bbe\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.283523 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zrc5\" (UniqueName: \"kubernetes.io/projected/3daf5f1d-5d15-4b93-ac0b-8209060a0557-kube-api-access-6zrc5\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.283755 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmmlm\" (UniqueName: \"kubernetes.io/projected/05194bfa-88c3-4826-8a59-6d62252e4b1a-kube-api-access-rmmlm\") pod \"nova-operator-controller-manager-79556f57fc-5llrj\" (UID: \"05194bfa-88c3-4826-8a59-6d62252e4b1a\") " pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.285736 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pd6fv\" (UniqueName: \"kubernetes.io/projected/b0d7107e-a617-4a7b-a6e3-0267996965ef-kube-api-access-pd6fv\") pod \"ovn-operator-controller-manager-56897c768d-gcj9h\" (UID: \"b0d7107e-a617-4a7b-a6e3-0267996965ef\") " pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.295365 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2mmwj" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.310891 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-nfl24"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.342093 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.346023 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.347387 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.358258 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-wl5st" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.373262 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzw4r\" (UniqueName: \"kubernetes.io/projected/bf155072-f786-47eb-9455-f807444d12e9-kube-api-access-pzw4r\") pod \"placement-operator-controller-manager-57988cc5b5-zq8vc\" (UID: \"bf155072-f786-47eb-9455-f807444d12e9\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.374373 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnjml\" (UniqueName: \"kubernetes.io/projected/82a45cae-9275-4f6a-8807-1ed1c97da89e-kube-api-access-xnjml\") pod \"swift-operator-controller-manager-d77b94747-nfl24\" (UID: \"82a45cae-9275-4f6a-8807-1ed1c97da89e\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.386144 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.413796 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.434779 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.436065 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.456306 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-g7kfh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.458802 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzw4r\" (UniqueName: \"kubernetes.io/projected/bf155072-f786-47eb-9455-f807444d12e9-kube-api-access-pzw4r\") pod \"placement-operator-controller-manager-57988cc5b5-zq8vc\" (UID: \"bf155072-f786-47eb-9455-f807444d12e9\") " pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.486058 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnjml\" (UniqueName: \"kubernetes.io/projected/82a45cae-9275-4f6a-8807-1ed1c97da89e-kube-api-access-xnjml\") pod \"swift-operator-controller-manager-d77b94747-nfl24\" (UID: \"82a45cae-9275-4f6a-8807-1ed1c97da89e\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.486148 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrz9c\" (UniqueName: \"kubernetes.io/projected/01236c17-da54-4428-9e82-9a3b0165d6fc-kube-api-access-rrz9c\") pod \"telemetry-operator-controller-manager-76cc84c6bb-zrldc\" (UID: \"01236c17-da54-4428-9e82-9a3b0165d6fc\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.486177 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxnwt\" (UniqueName: \"kubernetes.io/projected/1ff0a07f-935b-493a-a18a-a449232dc185-kube-api-access-sxnwt\") pod \"test-operator-controller-manager-5cd6c7f4c8-xmltd\" (UID: \"1ff0a07f-935b-493a-a18a-a449232dc185\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.488259 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.493166 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.559832 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnjml\" (UniqueName: \"kubernetes.io/projected/82a45cae-9275-4f6a-8807-1ed1c97da89e-kube-api-access-xnjml\") pod \"swift-operator-controller-manager-d77b94747-nfl24\" (UID: \"82a45cae-9275-4f6a-8807-1ed1c97da89e\") " pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.580768 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.582025 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.585118 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.586282 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-wcfnk" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.587203 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrz9c\" (UniqueName: \"kubernetes.io/projected/01236c17-da54-4428-9e82-9a3b0165d6fc-kube-api-access-rrz9c\") pod \"telemetry-operator-controller-manager-76cc84c6bb-zrldc\" (UID: \"01236c17-da54-4428-9e82-9a3b0165d6fc\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.587252 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxnwt\" (UniqueName: \"kubernetes.io/projected/1ff0a07f-935b-493a-a18a-a449232dc185-kube-api-access-sxnwt\") pod \"test-operator-controller-manager-5cd6c7f4c8-xmltd\" (UID: \"1ff0a07f-935b-493a-a18a-a449232dc185\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.587359 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.587415 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp2bp\" (UniqueName: \"kubernetes.io/projected/522c2ed1-a470-4885-88fc-395ed7834b23-kube-api-access-rp2bp\") pod \"watcher-operator-controller-manager-656dcb59d4-bdtsk\" (UID: \"522c2ed1-a470-4885-88fc-395ed7834b23\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.587895 5010 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.588000 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert podName:93625d2a-6f36-43a8-b26c-8f6506955b15 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:26.587972294 +0000 UTC m=+1027.378689592 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert") pod "infra-operator-controller-manager-57548d458d-sxdct" (UID: "93625d2a-6f36-43a8-b26c-8f6506955b15") : secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.600345 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.601250 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.620071 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.621070 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.621556 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxnwt\" (UniqueName: \"kubernetes.io/projected/1ff0a07f-935b-493a-a18a-a449232dc185-kube-api-access-sxnwt\") pod \"test-operator-controller-manager-5cd6c7f4c8-xmltd\" (UID: \"1ff0a07f-935b-493a-a18a-a449232dc185\") " pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.627119 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-j2dzs" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.628353 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.646906 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrz9c\" (UniqueName: \"kubernetes.io/projected/01236c17-da54-4428-9e82-9a3b0165d6fc-kube-api-access-rrz9c\") pod \"telemetry-operator-controller-manager-76cc84c6bb-zrldc\" (UID: \"01236c17-da54-4428-9e82-9a3b0165d6fc\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.652591 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.676833 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.689892 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.690408 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp2bp\" (UniqueName: \"kubernetes.io/projected/522c2ed1-a470-4885-88fc-395ed7834b23-kube-api-access-rp2bp\") pod \"watcher-operator-controller-manager-656dcb59d4-bdtsk\" (UID: \"522c2ed1-a470-4885-88fc-395ed7834b23\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.690527 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.690635 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8vzj\" (UniqueName: \"kubernetes.io/projected/1b523418-d938-4ba7-8788-b93b382429e3-kube-api-access-n8vzj\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.709232 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.727927 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp2bp\" (UniqueName: \"kubernetes.io/projected/522c2ed1-a470-4885-88fc-395ed7834b23-kube-api-access-rp2bp\") pod \"watcher-operator-controller-manager-656dcb59d4-bdtsk\" (UID: \"522c2ed1-a470-4885-88fc-395ed7834b23\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.747010 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.760689 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.774176 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.792491 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.792633 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8vzj\" (UniqueName: \"kubernetes.io/projected/1b523418-d938-4ba7-8788-b93b382429e3-kube-api-access-n8vzj\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.792997 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.793002 5010 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.793046 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.793082 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:26.293059587 +0000 UTC m=+1027.083776965 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "metrics-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.793195 5010 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.793255 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert podName:3daf5f1d-5d15-4b93-ac0b-8209060a0557 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:26.793238151 +0000 UTC m=+1027.583955299 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" (UID: "3daf5f1d-5d15-4b93-ac0b-8209060a0557") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.793278 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88tjm\" (UniqueName: \"kubernetes.io/projected/cdfa6310-b994-49ba-8e89-dc6584a65314-kube-api-access-88tjm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-h9gnm\" (UID: \"cdfa6310-b994-49ba-8e89-dc6584a65314\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.793338 5010 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: E1126 15:43:25.793410 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:26.293388405 +0000 UTC m=+1027.084105783 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "webhook-server-cert" not found Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.795970 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.831198 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.836046 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8vzj\" (UniqueName: \"kubernetes.io/projected/1b523418-d938-4ba7-8788-b93b382429e3-kube-api-access-n8vzj\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.894002 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88tjm\" (UniqueName: \"kubernetes.io/projected/cdfa6310-b994-49ba-8e89-dc6584a65314-kube-api-access-88tjm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-h9gnm\" (UID: \"cdfa6310-b994-49ba-8e89-dc6584a65314\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.919918 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88tjm\" (UniqueName: \"kubernetes.io/projected/cdfa6310-b994-49ba-8e89-dc6584a65314-kube-api-access-88tjm\") pod \"rabbitmq-cluster-operator-manager-668c99d594-h9gnm\" (UID: \"cdfa6310-b994-49ba-8e89-dc6584a65314\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.933904 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-955677c94-qmr28"] Nov 26 15:43:25 crc kubenswrapper[5010]: I1126 15:43:25.996749 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.096949 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.286403 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.301489 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.301584 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.301621 5010 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.301674 5010 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.301702 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:27.301684854 +0000 UTC m=+1028.092402002 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "webhook-server-cert" not found Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.302118 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:27.302109625 +0000 UTC m=+1028.092826773 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "metrics-server-cert" not found Nov 26 15:43:26 crc kubenswrapper[5010]: W1126 15:43:26.319794 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9e0c2ada_ac2c_4fc8_b786_2a62f0458c2f.slice/crio-abd868327bf4668539c1ecb491ed6ed04a7803b52270aef2903c94ff180fb6e9 WatchSource:0}: Error finding container abd868327bf4668539c1ecb491ed6ed04a7803b52270aef2903c94ff180fb6e9: Status 404 returned error can't find the container with id abd868327bf4668539c1ecb491ed6ed04a7803b52270aef2903c94ff180fb6e9 Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.550098 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" event={"ID":"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f","Type":"ContainerStarted","Data":"abd868327bf4668539c1ecb491ed6ed04a7803b52270aef2903c94ff180fb6e9"} Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.553906 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" event={"ID":"a4bbf592-007c-4176-a6a3-0209b33b6048","Type":"ContainerStarted","Data":"36455de0cac10eae152b5163c677e6ebba7c022861f89d3894c8b3af868aba22"} Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.572335 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.605077 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.605301 5010 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.605353 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert podName:93625d2a-6f36-43a8-b26c-8f6506955b15 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:28.605337413 +0000 UTC m=+1029.396054551 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert") pod "infra-operator-controller-manager-57548d458d-sxdct" (UID: "93625d2a-6f36-43a8-b26c-8f6506955b15") : secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.614762 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.624365 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.633251 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.640959 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.741614 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj"] Nov 26 15:43:26 crc kubenswrapper[5010]: W1126 15:43:26.747522 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod191eef94_8fdf_4180_8ce0_1d62fc3f0de0.slice/crio-9152fbe6b88b5d8fc57cf54689c1e04b04f44f68a4eeeb084bcd3b7b05809831 WatchSource:0}: Error finding container 9152fbe6b88b5d8fc57cf54689c1e04b04f44f68a4eeeb084bcd3b7b05809831: Status 404 returned error can't find the container with id 9152fbe6b88b5d8fc57cf54689c1e04b04f44f68a4eeeb084bcd3b7b05809831 Nov 26 15:43:26 crc kubenswrapper[5010]: W1126 15:43:26.750872 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod05194bfa_88c3_4826_8a59_6d62252e4b1a.slice/crio-30be77157a907b633a2ee8498856aea9dcf13551ae363c27e2687916d8fc43a4 WatchSource:0}: Error finding container 30be77157a907b633a2ee8498856aea9dcf13551ae363c27e2687916d8fc43a4: Status 404 returned error can't find the container with id 30be77157a907b633a2ee8498856aea9dcf13551ae363c27e2687916d8fc43a4 Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.753164 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2"] Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.766511 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-svgbw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-p5446_openstack-operators(7ec0a644-00e0-4b67-b2ad-7a7128dcaf19): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:26 crc kubenswrapper[5010]: W1126 15:43:26.767537 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb4799b0e_11ed_4331_84d1_daf581d00bbe.slice/crio-0687c7175d52555e42cbc6f5c1d592968152808441ee3b5a6cff775c06e6cad9 WatchSource:0}: Error finding container 0687c7175d52555e42cbc6f5c1d592968152808441ee3b5a6cff775c06e6cad9: Status 404 returned error can't find the container with id 0687c7175d52555e42cbc6f5c1d592968152808441ee3b5a6cff775c06e6cad9 Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.767589 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h"] Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.768756 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-svgbw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5b77f656f-p5446_openstack-operators(7ec0a644-00e0-4b67-b2ad-7a7128dcaf19): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.770007 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" podUID="7ec0a644-00e0-4b67-b2ad-7a7128dcaf19" Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.776082 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd"] Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.777073 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2nkbq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-fx8tr_openstack-operators(b4799b0e-11ed-4331-84d1-daf581d00bbe): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.778978 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2nkbq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-64cdc6ff96-fx8tr_openstack-operators(b4799b0e-11ed-4331-84d1-daf581d00bbe): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.780128 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.783247 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5b77f656f-p5446"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.793086 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.799313 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr"] Nov 26 15:43:26 crc kubenswrapper[5010]: I1126 15:43:26.808378 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.808816 5010 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:26 crc kubenswrapper[5010]: E1126 15:43:26.808920 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert podName:3daf5f1d-5d15-4b93-ac0b-8209060a0557 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:28.808898217 +0000 UTC m=+1029.599615425 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" (UID: "3daf5f1d-5d15-4b93-ac0b-8209060a0557") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.106083 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm"] Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.124443 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd"] Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.131013 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc"] Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.152368 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-d77b94747-nfl24"] Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.163750 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk"] Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.168369 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc"] Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.174079 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xnjml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-nfl24_openstack-operators(82a45cae-9275-4f6a-8807-1ed1c97da89e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.180911 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xnjml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-d77b94747-nfl24_openstack-operators(82a45cae-9275-4f6a-8807-1ed1c97da89e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.182120 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" podUID="82a45cae-9275-4f6a-8807-1ed1c97da89e" Nov 26 15:43:27 crc kubenswrapper[5010]: W1126 15:43:27.185249 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod522c2ed1_a470_4885_88fc_395ed7834b23.slice/crio-a56965087d7976c8ac435dc2243a340bebe8feaf2a3b2432f96e2f90ef2c6378 WatchSource:0}: Error finding container a56965087d7976c8ac435dc2243a340bebe8feaf2a3b2432f96e2f90ef2c6378: Status 404 returned error can't find the container with id a56965087d7976c8ac435dc2243a340bebe8feaf2a3b2432f96e2f90ef2c6378 Nov 26 15:43:27 crc kubenswrapper[5010]: W1126 15:43:27.185613 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbf155072_f786_47eb_9455_f807444d12e9.slice/crio-827422a722c35d39ffed94e54490bdfa3d07129d6e6d80d97b195ced1f308d20 WatchSource:0}: Error finding container 827422a722c35d39ffed94e54490bdfa3d07129d6e6d80d97b195ced1f308d20: Status 404 returned error can't find the container with id 827422a722c35d39ffed94e54490bdfa3d07129d6e6d80d97b195ced1f308d20 Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.189155 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sxnwt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-xmltd_openstack-operators(1ff0a07f-935b-493a-a18a-a449232dc185): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.192393 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:225958f250a1075b69439d776a13acc45c78695c21abda23600fb53ca1640423,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pzw4r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-zq8vc_openstack-operators(bf155072-f786-47eb-9455-f807444d12e9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.192564 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sxnwt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5cd6c7f4c8-xmltd_openstack-operators(1ff0a07f-935b-493a-a18a-a449232dc185): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.193812 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" podUID="1ff0a07f-935b-493a-a18a-a449232dc185" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.193879 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rp2bp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-bdtsk_openstack-operators(522c2ed1-a470-4885-88fc-395ed7834b23): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.200272 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rp2bp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-bdtsk_openstack-operators(522c2ed1-a470-4885-88fc-395ed7834b23): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.202411 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" podUID="522c2ed1-a470-4885-88fc-395ed7834b23" Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.317482 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.317612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.317617 5010 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.317694 5010 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.317758 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:29.317737471 +0000 UTC m=+1030.108454619 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "webhook-server-cert" not found Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.317778 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:29.317769992 +0000 UTC m=+1030.108487140 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "metrics-server-cert" not found Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.569510 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" event={"ID":"ce1fedbc-31da-4c37-9731-34e79ab604f4","Type":"ContainerStarted","Data":"6fc71ad72520ba99bd62ac43067c088e50c47883354c59b05e45eb7d349fc23c"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.571053 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" event={"ID":"b4799b0e-11ed-4331-84d1-daf581d00bbe","Type":"ContainerStarted","Data":"0687c7175d52555e42cbc6f5c1d592968152808441ee3b5a6cff775c06e6cad9"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.572338 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" event={"ID":"191eef94-8fdf-4180-8ce0-1d62fc3f0de0","Type":"ContainerStarted","Data":"9152fbe6b88b5d8fc57cf54689c1e04b04f44f68a4eeeb084bcd3b7b05809831"} Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.573328 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.574310 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" event={"ID":"bf155072-f786-47eb-9455-f807444d12e9","Type":"ContainerStarted","Data":"827422a722c35d39ffed94e54490bdfa3d07129d6e6d80d97b195ced1f308d20"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.576731 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" event={"ID":"01236c17-da54-4428-9e82-9a3b0165d6fc","Type":"ContainerStarted","Data":"3c3dd06fe24f14ffeee1bf035786e9a94ebf6e270acf173563d59a6eb9f96fc4"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.579180 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" event={"ID":"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19","Type":"ContainerStarted","Data":"e499aa329578817325607d2b4a70ec0516ca253e985aba61bb30c8ee8e0b5fa3"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.580508 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" event={"ID":"6a970d68-d885-4fc2-9d58-508537a42572","Type":"ContainerStarted","Data":"be194998ee31a3e2d73b6cde8ce180f703e64e82e6901b8f859cdc4f1944f90d"} Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.595107 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" podUID="7ec0a644-00e0-4b67-b2ad-7a7128dcaf19" Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.597988 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" event={"ID":"7e5769c2-7f83-41ff-9365-7f5792e8d81b","Type":"ContainerStarted","Data":"425646cfad8a37dd5178e2cffea25799a9847b676ec49a447632db1165405e15"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.604488 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" event={"ID":"05194bfa-88c3-4826-8a59-6d62252e4b1a","Type":"ContainerStarted","Data":"30be77157a907b633a2ee8498856aea9dcf13551ae363c27e2687916d8fc43a4"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.613702 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" event={"ID":"cdfa6310-b994-49ba-8e89-dc6584a65314","Type":"ContainerStarted","Data":"86957d7ffb49a1b72124f7a7d858b55366828ead939a03d11d15559cf0bec08a"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.619357 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" event={"ID":"522c2ed1-a470-4885-88fc-395ed7834b23","Type":"ContainerStarted","Data":"a56965087d7976c8ac435dc2243a340bebe8feaf2a3b2432f96e2f90ef2c6378"} Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.624059 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" podUID="522c2ed1-a470-4885-88fc-395ed7834b23" Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.624879 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" event={"ID":"b0d7107e-a617-4a7b-a6e3-0267996965ef","Type":"ContainerStarted","Data":"75fe1b3d7890c6a024b0d3dfc655de529054c9ff909ec7ffec21643e5fb0e2c7"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.653962 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" event={"ID":"b6c13a13-621b-45cb-9830-4dfaf15ee06b","Type":"ContainerStarted","Data":"aaba35728905130ef92181b5f84d3c887542565b40a795e1a7445089e9092501"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.655552 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" event={"ID":"1ff0a07f-935b-493a-a18a-a449232dc185","Type":"ContainerStarted","Data":"27946269164ca87e8580eb4e5d694329f886bc7382bab3ee1c8a62d1123b02a9"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.657881 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" event={"ID":"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548","Type":"ContainerStarted","Data":"641a19b6d4c64d44f3b7a636770fba713bd5e39ddc009150ea14252dee5e1d71"} Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.658523 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" podUID="1ff0a07f-935b-493a-a18a-a449232dc185" Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.660470 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" event={"ID":"dfb4a15b-a139-4778-acc7-f236e947ca96","Type":"ContainerStarted","Data":"340f5d86647483ed72e47b00daa6ae6fdb4c7287340acf688655ba9c62f139bc"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.663001 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" event={"ID":"8b2b09a7-2b17-43da-ae0e-4448b96eed50","Type":"ContainerStarted","Data":"c41b37420274535b5478e32f0871808004d8f602958f51658ef22aa0446a33ca"} Nov 26 15:43:27 crc kubenswrapper[5010]: I1126 15:43:27.663994 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" event={"ID":"82a45cae-9275-4f6a-8807-1ed1c97da89e","Type":"ContainerStarted","Data":"9878a0b825238de696ac49d5b0f5986aa90a62209e7fff9e2ea3096372c31723"} Nov 26 15:43:27 crc kubenswrapper[5010]: E1126 15:43:27.669601 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" podUID="82a45cae-9275-4f6a-8807-1ed1c97da89e" Nov 26 15:43:28 crc kubenswrapper[5010]: I1126 15:43:28.640402 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.640573 5010 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.640639 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert podName:93625d2a-6f36-43a8-b26c-8f6506955b15 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:32.640620607 +0000 UTC m=+1033.431337745 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert") pod "infra-operator-controller-manager-57548d458d-sxdct" (UID: "93625d2a-6f36-43a8-b26c-8f6506955b15") : secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.687434 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2ee37ff474bee3203447df4f326a9279a515e770573153338296dd074722c677\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" podUID="7ec0a644-00e0-4b67-b2ad-7a7128dcaf19" Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.687836 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" podUID="522c2ed1-a470-4885-88fc-395ed7834b23" Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.688090 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:ddc8a82f05930db8ee7a8d6d189b5a66373060656e4baf71ac302f89c477da4c\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.688166 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:72236301580ff9080f7e311b832d7ba66666a9afeda51f969745229624ff26e4\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" podUID="82a45cae-9275-4f6a-8807-1ed1c97da89e" Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.688356 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:210517b918e30df1c95fc7d961c8e57e9a9d1cc2b9fe7eb4dad2034dd53a90aa\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" podUID="1ff0a07f-935b-493a-a18a-a449232dc185" Nov 26 15:43:28 crc kubenswrapper[5010]: I1126 15:43:28.852170 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.852383 5010 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:28 crc kubenswrapper[5010]: E1126 15:43:28.852565 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert podName:3daf5f1d-5d15-4b93-ac0b-8209060a0557 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:32.852453439 +0000 UTC m=+1033.643170587 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" (UID: "3daf5f1d-5d15-4b93-ac0b-8209060a0557") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:29 crc kubenswrapper[5010]: I1126 15:43:29.365323 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:29 crc kubenswrapper[5010]: I1126 15:43:29.365864 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:29 crc kubenswrapper[5010]: E1126 15:43:29.366008 5010 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 15:43:29 crc kubenswrapper[5010]: E1126 15:43:29.366072 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:33.366052433 +0000 UTC m=+1034.156769581 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "metrics-server-cert" not found Nov 26 15:43:29 crc kubenswrapper[5010]: E1126 15:43:29.366475 5010 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 15:43:29 crc kubenswrapper[5010]: E1126 15:43:29.366512 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:33.366502594 +0000 UTC m=+1034.157219742 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "webhook-server-cert" not found Nov 26 15:43:32 crc kubenswrapper[5010]: I1126 15:43:32.650684 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:32 crc kubenswrapper[5010]: E1126 15:43:32.651059 5010 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:32 crc kubenswrapper[5010]: E1126 15:43:32.651177 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert podName:93625d2a-6f36-43a8-b26c-8f6506955b15 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:40.651150986 +0000 UTC m=+1041.441868324 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert") pod "infra-operator-controller-manager-57548d458d-sxdct" (UID: "93625d2a-6f36-43a8-b26c-8f6506955b15") : secret "infra-operator-webhook-server-cert" not found Nov 26 15:43:32 crc kubenswrapper[5010]: I1126 15:43:32.854187 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:32 crc kubenswrapper[5010]: E1126 15:43:32.854406 5010 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:32 crc kubenswrapper[5010]: E1126 15:43:32.854499 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert podName:3daf5f1d-5d15-4b93-ac0b-8209060a0557 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:40.854474074 +0000 UTC m=+1041.645191382 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert") pod "openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" (UID: "3daf5f1d-5d15-4b93-ac0b-8209060a0557") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 26 15:43:33 crc kubenswrapper[5010]: I1126 15:43:33.464345 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:33 crc kubenswrapper[5010]: I1126 15:43:33.464509 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:33 crc kubenswrapper[5010]: E1126 15:43:33.464679 5010 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 26 15:43:33 crc kubenswrapper[5010]: E1126 15:43:33.464727 5010 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 26 15:43:33 crc kubenswrapper[5010]: E1126 15:43:33.464837 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:41.464806587 +0000 UTC m=+1042.255523815 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "metrics-server-cert" not found Nov 26 15:43:33 crc kubenswrapper[5010]: E1126 15:43:33.464941 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs podName:1b523418-d938-4ba7-8788-b93b382429e3 nodeName:}" failed. No retries permitted until 2025-11-26 15:43:41.464917849 +0000 UTC m=+1042.255635078 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs") pod "openstack-operator-controller-manager-659d75f7c6-lwbrh" (UID: "1b523418-d938-4ba7-8788-b93b382429e3") : secret "webhook-server-cert" not found Nov 26 15:43:40 crc kubenswrapper[5010]: I1126 15:43:40.710314 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:40 crc kubenswrapper[5010]: I1126 15:43:40.719782 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/93625d2a-6f36-43a8-b26c-8f6506955b15-cert\") pod \"infra-operator-controller-manager-57548d458d-sxdct\" (UID: \"93625d2a-6f36-43a8-b26c-8f6506955b15\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:40 crc kubenswrapper[5010]: I1126 15:43:40.862215 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:43:40 crc kubenswrapper[5010]: I1126 15:43:40.917643 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:40 crc kubenswrapper[5010]: I1126 15:43:40.921781 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3daf5f1d-5d15-4b93-ac0b-8209060a0557-cert\") pod \"openstack-baremetal-operator-controller-manager-674cb676c8nb4vx\" (UID: \"3daf5f1d-5d15-4b93-ac0b-8209060a0557\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:40 crc kubenswrapper[5010]: I1126 15:43:40.990373 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:43:41 crc kubenswrapper[5010]: E1126 15:43:41.166804 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7" Nov 26 15:43:41 crc kubenswrapper[5010]: E1126 15:43:41.167013 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ironic-operator@sha256:d65dbfc956e9cf376f3c48fc3a0942cb7306b5164f898c40d1efca106df81db7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6x4vk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:43:41 crc kubenswrapper[5010]: I1126 15:43:41.526147 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:41 crc kubenswrapper[5010]: I1126 15:43:41.526236 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:41 crc kubenswrapper[5010]: I1126 15:43:41.530592 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-metrics-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:41 crc kubenswrapper[5010]: I1126 15:43:41.545967 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/1b523418-d938-4ba7-8788-b93b382429e3-webhook-certs\") pod \"openstack-operator-controller-manager-659d75f7c6-lwbrh\" (UID: \"1b523418-d938-4ba7-8788-b93b382429e3\") " pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:41 crc kubenswrapper[5010]: I1126 15:43:41.683936 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:41 crc kubenswrapper[5010]: E1126 15:43:41.911041 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:9413ed1bc2ae1a6bd28c59b1c7f7e91e1638de7b2a7d4729ed3fa2135182465d" Nov 26 15:43:41 crc kubenswrapper[5010]: E1126 15:43:41.911251 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:9413ed1bc2ae1a6bd28c59b1c7f7e91e1638de7b2a7d4729ed3fa2135182465d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-fbs2z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-5d494799bf-mc96z_openstack-operators(8b2b09a7-2b17-43da-ae0e-4448b96eed50): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:43:42 crc kubenswrapper[5010]: E1126 15:43:42.657181 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2" Nov 26 15:43:42 crc kubenswrapper[5010]: E1126 15:43:42.657485 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:888edf6f432e52eaa5fc3caeae616fe38a3302b006bbba0e38885b2beba9f0f2,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rwj8q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-66f4dd4bc7-sj6tg_openstack-operators(dfb4a15b-a139-4778-acc7-f236e947ca96): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:43:44 crc kubenswrapper[5010]: E1126 15:43:44.218811 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6" Nov 26 15:43:44 crc kubenswrapper[5010]: E1126 15:43:44.219702 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/manila-operator@sha256:89910bc3ecceb7590d3207ac294eb7354de358cf39ef03c72323b26c598e50e6,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vrhkh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-5d499bf58b-k7vx2_openstack-operators(7e5769c2-7f83-41ff-9365-7f5792e8d81b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:43:44 crc kubenswrapper[5010]: E1126 15:43:44.409160 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385" Nov 26 15:43:44 crc kubenswrapper[5010]: E1126 15:43:44.409422 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:7d66757c0af67104f0389e851a7cc0daa44443ad202d157417bd86bbb57cc385,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rrz9c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-76cc84c6bb-zrldc_openstack-operators(01236c17-da54-4428-9e82-9a3b0165d6fc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:43:46 crc kubenswrapper[5010]: E1126 15:43:46.330972 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711" Nov 26 15:43:46 crc kubenswrapper[5010]: E1126 15:43:46.335877 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:25faa5b0e4801d4d3b01a28b877ed3188eee71f33ad66f3c2e86b7921758e711,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-z2gzh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7b4567c7cf-9lx7h_openstack-operators(ce1fedbc-31da-4c37-9731-34e79ab604f4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:43:50 crc kubenswrapper[5010]: E1126 15:43:50.021959 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Nov 26 15:43:50 crc kubenswrapper[5010]: E1126 15:43:50.022636 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-88tjm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-h9gnm_openstack-operators(cdfa6310-b994-49ba-8e89-dc6584a65314): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:43:50 crc kubenswrapper[5010]: E1126 15:43:50.027835 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" podUID="cdfa6310-b994-49ba-8e89-dc6584a65314" Nov 26 15:43:50 crc kubenswrapper[5010]: E1126 15:43:50.880788 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" podUID="cdfa6310-b994-49ba-8e89-dc6584a65314" Nov 26 15:43:52 crc kubenswrapper[5010]: I1126 15:43:52.943661 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-sxdct"] Nov 26 15:43:53 crc kubenswrapper[5010]: I1126 15:43:53.249822 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh"] Nov 26 15:43:53 crc kubenswrapper[5010]: I1126 15:43:53.256991 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx"] Nov 26 15:43:54 crc kubenswrapper[5010]: W1126 15:43:54.478871 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b523418_d938_4ba7_8788_b93b382429e3.slice/crio-74be651d471222d907caee43a2023104bdf88953971521986b4e502fa6449e84 WatchSource:0}: Error finding container 74be651d471222d907caee43a2023104bdf88953971521986b4e502fa6449e84: Status 404 returned error can't find the container with id 74be651d471222d907caee43a2023104bdf88953971521986b4e502fa6449e84 Nov 26 15:43:54 crc kubenswrapper[5010]: I1126 15:43:54.918295 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" event={"ID":"191eef94-8fdf-4180-8ce0-1d62fc3f0de0","Type":"ContainerStarted","Data":"c40624e5036b61f37a3a5151cb7712ccdabb095aae47988a25b62eedc1cc46ad"} Nov 26 15:43:54 crc kubenswrapper[5010]: I1126 15:43:54.932427 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" event={"ID":"1b523418-d938-4ba7-8788-b93b382429e3","Type":"ContainerStarted","Data":"74be651d471222d907caee43a2023104bdf88953971521986b4e502fa6449e84"} Nov 26 15:43:54 crc kubenswrapper[5010]: I1126 15:43:54.934146 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" event={"ID":"3daf5f1d-5d15-4b93-ac0b-8209060a0557","Type":"ContainerStarted","Data":"d8192540711f7f47360bd52818dddc20cfd03760237d803dcdd67fda1341baff"} Nov 26 15:43:54 crc kubenswrapper[5010]: I1126 15:43:54.939444 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" event={"ID":"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f","Type":"ContainerStarted","Data":"b1fd64c80ce43af3e354ed98cab94565fbbb3adb3f0d394ec74619425ffb7574"} Nov 26 15:43:54 crc kubenswrapper[5010]: I1126 15:43:54.943174 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" event={"ID":"93625d2a-6f36-43a8-b26c-8f6506955b15","Type":"ContainerStarted","Data":"0563efef885cf35fbac9690b1f2ec35bf05f56678848cfe469d9453743b32b32"} Nov 26 15:43:54 crc kubenswrapper[5010]: I1126 15:43:54.944530 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" event={"ID":"a4bbf592-007c-4176-a6a3-0209b33b6048","Type":"ContainerStarted","Data":"935bf0f31303b4c8e4604b5384f55b475f2c052bd5e4a6ed50ddd61d0dffa779"} Nov 26 15:43:54 crc kubenswrapper[5010]: I1126 15:43:54.945594 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" event={"ID":"b0d7107e-a617-4a7b-a6e3-0267996965ef","Type":"ContainerStarted","Data":"ff4e2ed76a4e549e0d660455bf751ed522150bb92788d3f57daebe0960cc8b8f"} Nov 26 15:43:55 crc kubenswrapper[5010]: I1126 15:43:55.958967 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" event={"ID":"05194bfa-88c3-4826-8a59-6d62252e4b1a","Type":"ContainerStarted","Data":"b9db1037794704a0616860331855c58426ae1db11825a43d0dab8bc133a77603"} Nov 26 15:43:55 crc kubenswrapper[5010]: I1126 15:43:55.961723 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" event={"ID":"b6c13a13-621b-45cb-9830-4dfaf15ee06b","Type":"ContainerStarted","Data":"786138c069c0810c3e4d6dc76e3af66319ca430ec0c277a722fbf73353c60a65"} Nov 26 15:43:55 crc kubenswrapper[5010]: I1126 15:43:55.963041 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" event={"ID":"6a970d68-d885-4fc2-9d58-508537a42572","Type":"ContainerStarted","Data":"b50f73582c007c8f24a9c03ddd236263dc25499703ad832f45245a094947f0c0"} Nov 26 15:43:56 crc kubenswrapper[5010]: I1126 15:43:56.973335 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" event={"ID":"b4799b0e-11ed-4331-84d1-daf581d00bbe","Type":"ContainerStarted","Data":"c8ca7ec6ec2d89eb4768de0b045c04739ae5566a9cfcb185559fb696a96e70c5"} Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.026609 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" event={"ID":"1ff0a07f-935b-493a-a18a-a449232dc185","Type":"ContainerStarted","Data":"bd717705b7d6552c334ac05e9e52e2cf5bd8e97bc19a100a306812b3a42aa922"} Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.034229 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" event={"ID":"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f","Type":"ContainerStarted","Data":"a4c5b081c4c74b33e83b78ece604ec7863a64412063e5a9f2652411b5f9cdf21"} Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.034957 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.038409 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" event={"ID":"522c2ed1-a470-4885-88fc-395ed7834b23","Type":"ContainerStarted","Data":"d15d81ff00570f28f177a49da2a1e47a131b7d179fe2a0690555be742299479f"} Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.039864 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.040370 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" event={"ID":"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19","Type":"ContainerStarted","Data":"f9412429b8a67c0a190bbe69c6b86f7e42407ffc5cb422757ff73d8c857f60d2"} Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.042084 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" event={"ID":"1b523418-d938-4ba7-8788-b93b382429e3","Type":"ContainerStarted","Data":"1e7f60f161902fc7ea8951bcf6c0ff8f1c782354efd64e3efaed636c46858e43"} Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.042760 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.043941 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" event={"ID":"82a45cae-9275-4f6a-8807-1ed1c97da89e","Type":"ContainerStarted","Data":"154aef2360f415a2ceded687e7fa0a7301fb8088de9bde3d7b7c6cf03b55c884"} Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.053843 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" podStartSLOduration=2.615613571 podStartE2EDuration="35.053824359s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.326453366 +0000 UTC m=+1027.117170504" lastFinishedPulling="2025-11-26 15:43:58.764664144 +0000 UTC m=+1059.555381292" observedRunningTime="2025-11-26 15:43:59.051353157 +0000 UTC m=+1059.842070325" watchObservedRunningTime="2025-11-26 15:43:59.053824359 +0000 UTC m=+1059.844541497" Nov 26 15:43:59 crc kubenswrapper[5010]: I1126 15:43:59.121563 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" podStartSLOduration=34.12153106 podStartE2EDuration="34.12153106s" podCreationTimestamp="2025-11-26 15:43:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:43:59.112599626 +0000 UTC m=+1059.903316774" watchObservedRunningTime="2025-11-26 15:43:59.12153106 +0000 UTC m=+1059.912248208" Nov 26 15:44:00 crc kubenswrapper[5010]: I1126 15:44:00.054431 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" event={"ID":"a4bbf592-007c-4176-a6a3-0209b33b6048","Type":"ContainerStarted","Data":"e37a7f29394a08304640d083657ca78f85820fa80be9266b517cc2ea57eab942"} Nov 26 15:44:00 crc kubenswrapper[5010]: I1126 15:44:00.054958 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 15:44:00 crc kubenswrapper[5010]: I1126 15:44:00.059091 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 15:44:00 crc kubenswrapper[5010]: I1126 15:44:00.060888 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" event={"ID":"522c2ed1-a470-4885-88fc-395ed7834b23","Type":"ContainerStarted","Data":"5bde20bfe6020ec87ac54470d875f7c6b2a609bfb74c33b6d3d97ccf8142a2ba"} Nov 26 15:44:00 crc kubenswrapper[5010]: I1126 15:44:00.061325 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 15:44:00 crc kubenswrapper[5010]: I1126 15:44:00.100076 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" podStartSLOduration=2.553262088 podStartE2EDuration="35.100048664s" podCreationTimestamp="2025-11-26 15:43:25 +0000 UTC" firstStartedPulling="2025-11-26 15:43:27.18873016 +0000 UTC m=+1027.979447308" lastFinishedPulling="2025-11-26 15:43:59.735516736 +0000 UTC m=+1060.526233884" observedRunningTime="2025-11-26 15:44:00.092575946 +0000 UTC m=+1060.883293104" watchObservedRunningTime="2025-11-26 15:44:00.100048664 +0000 UTC m=+1060.890765812" Nov 26 15:44:00 crc kubenswrapper[5010]: I1126 15:44:00.101204 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" podStartSLOduration=2.629664915 podStartE2EDuration="36.101190483s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:25.996435346 +0000 UTC m=+1026.787152494" lastFinishedPulling="2025-11-26 15:43:59.467960924 +0000 UTC m=+1060.258678062" observedRunningTime="2025-11-26 15:44:00.076193705 +0000 UTC m=+1060.866910853" watchObservedRunningTime="2025-11-26 15:44:00.101190483 +0000 UTC m=+1060.891907631" Nov 26 15:44:00 crc kubenswrapper[5010]: E1126 15:44:00.599261 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 26 15:44:00 crc kubenswrapper[5010]: E1126 15:44:00.599489 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pzw4r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-57988cc5b5-zq8vc_openstack-operators(bf155072-f786-47eb-9455-f807444d12e9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:44:00 crc kubenswrapper[5010]: E1126 15:44:00.600940 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" podUID="bf155072-f786-47eb-9455-f807444d12e9" Nov 26 15:44:01 crc kubenswrapper[5010]: I1126 15:44:01.073028 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" event={"ID":"6a970d68-d885-4fc2-9d58-508537a42572","Type":"ContainerStarted","Data":"1ae278aa304d951cef8054c6af3c6e13ac13a0a5cc47d1d1b4c13c5d204d887d"} Nov 26 15:44:01 crc kubenswrapper[5010]: I1126 15:44:01.074934 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 15:44:01 crc kubenswrapper[5010]: I1126 15:44:01.077063 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 15:44:01 crc kubenswrapper[5010]: I1126 15:44:01.098042 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" podStartSLOduration=3.16720556 podStartE2EDuration="37.098021087s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.659646118 +0000 UTC m=+1027.450363266" lastFinishedPulling="2025-11-26 15:44:00.590461645 +0000 UTC m=+1061.381178793" observedRunningTime="2025-11-26 15:44:01.090678702 +0000 UTC m=+1061.881395860" watchObservedRunningTime="2025-11-26 15:44:01.098021087 +0000 UTC m=+1061.888738235" Nov 26 15:44:05 crc kubenswrapper[5010]: I1126 15:44:05.837354 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 15:44:09 crc kubenswrapper[5010]: I1126 15:44:09.144135 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" event={"ID":"93625d2a-6f36-43a8-b26c-8f6506955b15","Type":"ContainerStarted","Data":"fe461b19e16d20c2455c84e24a435bc410ffb68112aac3b95ec29676f730a5a5"} Nov 26 15:44:11 crc kubenswrapper[5010]: I1126 15:44:11.425074 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:44:11 crc kubenswrapper[5010]: I1126 15:44:11.425682 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:44:11 crc kubenswrapper[5010]: I1126 15:44:11.693742 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 15:44:14 crc kubenswrapper[5010]: E1126 15:44:14.320454 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:51a478c52d9012c08743f63b44a3721c7ff7a0599ba9c2cf89ad54ea41b19e41" Nov 26 15:44:14 crc kubenswrapper[5010]: E1126 15:44:14.321958 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:51a478c52d9012c08743f63b44a3721c7ff7a0599ba9c2cf89ad54ea41b19e41,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent@sha256:d5f5c71b6e6992182d66adbf5430bdc67ccbbea2893d41a0b690309c38d67469,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner@sha256:dac9e7b4f0cbdfc0ea387d33aa1c5406e468fc221f9c92eff2dd0071370c8747,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api@sha256:c8101c77a82eae4407e41e1fd766dfc6e1b7f9ed1679e3efb6f91ff97a1557b2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator@sha256:eb9743b21bbadca6f7cb9ac4fc46b5d58c51c674073c7e1121f4474a71304071,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener@sha256:3d81f839b98c2e2a5bf0da79f2f9a92dff7d0a3c5a830b0e95c89dad8cf98a6a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier@sha256:d19ac99249b47dd8ea16cd6aaa5756346aa8a2f119ee50819c15c5366efb417d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24@sha256:8536169e5537fe6c330eba814248abdcf39cdd8f7e7336034d74e6fda9544050,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener@sha256:4f1fa337760e82bfd67cdd142a97c121146dd7e621daac161940dd5e4ddb80dc,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker@sha256:3613b345d5baed98effd906f8b0242d863e14c97078ea473ef01fe1b0afc46f3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute@sha256:9f9f367ed4c85efb16c3a74a4bb707ff0db271d7bc5abc70a71e984b55f43003,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi@sha256:b73ad22b4955b06d584bce81742556d8c0c7828c495494f8ea7c99391c61b70f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter@sha256:7211a617ec657701ca819aa0ba28e1d5750f5bf2c1391b755cc4a48cc360b0fa,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification@sha256:aa1d3aaf6b394621ed4089a98e0a82b763f467e8b5c5db772f9fdf99fc86e333,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core@sha256:09b5017c95d7697e66b9c64846bc48ef5826a009cba89b956ec54561e5f4a2d1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup@sha256:d6661053141b6df421288a7c9968a155ab82e478c1d75ab41f2cebe2f0ca02d2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler@sha256:ce2d63258cb4e7d0d1c07234de6889c5434464190906798019311a1c7cf6387f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume@sha256:0485ef9e5b4437f7cd2ba54034a87722ce4669ee86b3773c6b0c037ed8000e91,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api@sha256:f3dcdb3eccaf5bc69f97de24225f071e251992f4595c931ed37e8a5d9ada8b21,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor@sha256:43b7d46663cd88410bb01870767f2c561e8c4bafebc1ae283c0aa59fecfaeb16,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api@sha256:ff0c553ceeb2e0f44b010e37dc6d0db8a251797b88e56468b7cf7f05253e4232,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9@sha256:624f553f073af7493d34828b074adc9981cce403edd8e71482c7307008479fd9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central@sha256:e3874936a518c8560339db8f840fc5461885819f6050b5de8d3ab9199bea5094,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns@sha256:1cea25f1d2a45affc80c46fb9d427749d3f06b61590ac6070a2910e3ec8a4e5d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer@sha256:e36d5b9a65194f12f7b01c6422ba3ed52a687fd1695fbb21f4986c67d9f9317f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound@sha256:8b21bec527d54cd766e277889df6bcccd2baeaa946274606b986c0c3b7ca689f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker@sha256:45aceca77f8fcf61127f0da650bdfdf11ede9b0944c78b63fab819d03283f96b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr@sha256:709ac58998927dd61786821ae1e63343fd97ccf5763aac5edb4583eea9401d22,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid@sha256:867d4ef7c21f75e6030a685b5762ab4d84b671316ed6b98d75200076e93342cd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler@sha256:581b65b646301e0fcb07582150ba63438f1353a85bf9acf1eb2acb4ce71c58bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron@sha256:2b90da93550b99d2fcfa95bd819f3363aa68346a416f8dc7baac3e9c5f487761,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd@sha256:6f86db36d668348be8c5b46dcda8b1fa23d34bfdc07164fbcbe7a6327fb4de24,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent@sha256:8cde52cef8795d1c91983b100d86541c7718160ec260fe0f97b96add4c2c8ee8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:a9583cb3baf440d2358ef041373833afbeae60da8159dd031502379901141620,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent@sha256:835ebed082fe1c45bd799d1d5357595ce63efeb05ca876f26b08443facb9c164,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent@sha256:011d682241db724bc40736c9b54d2ea450ea7e6be095b1ff5fa28c8007466775,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent@sha256:2025da90cff8f563deb08bee71efe16d4078edc2a767b2e225cca5c77f1aa2f9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api@sha256:ff46cd5e0e13d105c4629e78c2734a50835f06b6a1e31da9e0462981d10c4be3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn@sha256:5b4fd0c2b76fa5539f74687b11c5882d77bd31352452322b37ff51fa18f12a61,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine@sha256:5e03376bd895346dc8f627ca15ded942526ed8b5e92872f453ce272e694d18d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon@sha256:65b94ff9fcd486845fb0544583bf2a973246a61a0ad32340fb92d632285f1057,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached@sha256:36a0fb31978aee0ded2483de311631e64a644d0b0685b5b055f65ede7eb8e8a2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis@sha256:5f6045841aff0fde6f684a34cdf49f8dc7b2c3bcbdeab201f1058971e0c5f79e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api@sha256:448f4e1b740c30936e340bd6e8534d78c83357bf373a4223950aa64d3484f007,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor@sha256:b68e3615af8a0eb0ef6bf9ceeef59540a6f4a9a85f6078a3620be115c73a7db8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector@sha256:7eae01cf60383e523c9cd94d158a9162120a7370829a1dad20fdea6b0fd660bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent@sha256:28cc10501788081eb61b5a1af35546191a92741f4f109df54c74e2b19439d0f9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe@sha256:9a616e37acfd120612f78043237a8541266ba34883833c9beb43f3da313661ad,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent@sha256:6b1be6cd94a0942259bca5d5d2c30cc7de4a33276b61f8ae3940226772106256,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone@sha256:02d2c22d15401574941fbe057095442dee0d6f7a0a9341de35d25e6a12a3fe4b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api@sha256:fc3b3a36b74fd653946723c54b208072d52200635850b531e9d595a7aaea5a01,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler@sha256:7850ccbff320bf9a1c9c769c1c70777eb97117dd8cd5ae4435be9b4622cf807a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share@sha256:397dac7e39cf40d14a986e6ec4a60fb698ca35c197d0db315b1318514cc6d1d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils@sha256:1c95142a36276686e720f86423ee171dc9adcc1e89879f627545b7c906ccd9bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api@sha256:e331a8fde6638e5ba154c4f0b38772a9a424f60656f2777245975fb1fa02f07d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute@sha256:b6e1e8a249d36ef36c6ac4170af1e043dda1ccc0f9672832d3ff151bf3533076,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor@sha256:cd3cf7a34053e850b4d4f9f4ea4c74953a54a42fd18e47d7c01d44a88923e925,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy@sha256:aee28476344fc0cc148fbe97daf9b1bfcedc22001550bba4bdc4e84be7b6989d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler@sha256:cfa0b92c976603ee2a937d34013a238fcd8aa75f998e50642e33489f14124633,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api@sha256:73c2f2d6eecf88acf4e45b133c8373d9bb006b530e0aff0b28f3b7420620a874,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager@sha256:927b405cc04abe5ff716186e8d35e2dc5fad1c8430194659ee6617d74e4e055d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping@sha256:6154d7cebd7c339afa5b86330262156171743aa5b79c2b78f9a2f378005ed8fb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog@sha256:e2db2f4af8d3d0be7868c6efef0189f3a2c74a8f96ae10e3f991cdf83feaef29,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker@sha256:c773629df257726a6d3cacc24a6e4df0babcd7d37df04e6d14676a8da028b9c9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient@sha256:776211111e2e6493706dbc49a3ba44f31d1b947919313ed3a0f35810e304ec52,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather@sha256:ae4a20d9aad04cfaeaa3105fa8e37db4216c3b17530bc98daf1204555bc23485,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:ecd56e6733c475f2d441344fd98f288c3eac0261ba113695fec7520a954ccbc7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi@sha256:7cccf24ad0a152f90ca39893064f48a1656950ee8142685a5d482c71f0bdc9f5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:af46761060c7987e1dee5f14c06d85b46f12ad8e09c83d4246ab4e3a65dfda3e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base@sha256:05450b48f6b5352b2686a26e933e8727748edae2ae9652d9164b7d7a1817c55a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server@sha256:fc9c99eeef91523482bd8f92661b393287e1f2a24ad2ba9e33191f8de9af74cf,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:3e4ecc02b4b5e0860482a93599ba9ca598c5ce26c093c46e701f96fe51acb208,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server@sha256:2346037e064861c7892690d2e8b3e1eea1a26ce3c3a11fda0b41301965bc828c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account@sha256:c26c3ff9cabe3593ceb10006e782bf9391ac14785768ce9eec4f938c2d3cf228,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object@sha256:daa45220bb1c47922d0917aa8fe423bb82b03a01429f1c9e37635e701e352d71,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server@sha256:a80a074e227d3238bb6f285788a9e886ae7a5909ccbc5c19c93c369bdfe5b3b8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all@sha256:58ac66ca1be01fe0157977bd79a26cde4d0de153edfaf4162367c924826b2ef4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api@sha256:5f22dc1a974f5b99fef0655d0e3d01389bcc632880ac921a65fec25652e74664,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier@sha256:84958d1a17837198f9d91ac32c5d7be95d041a0228fe01d5c626c855241da8c9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine@sha256:8716afabd6004269722b3133937e846a87a8420941a3469852c031176aed5848,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6zrc5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-674cb676c8nb4vx_openstack-operators(3daf5f1d-5d15-4b93-ac0b-8209060a0557): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:44:14 crc kubenswrapper[5010]: E1126 15:44:14.576451 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0" Nov 26 15:44:14 crc kubenswrapper[5010]: E1126 15:44:14.576913 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6x4vk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:44:14 crc kubenswrapper[5010]: E1126 15:44:14.578206 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"]" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" Nov 26 15:44:18 crc kubenswrapper[5010]: I1126 15:44:18.232932 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" event={"ID":"82a45cae-9275-4f6a-8807-1ed1c97da89e","Type":"ContainerStarted","Data":"99aa585c88147d0636924708fa3b4ea460ffecbed9d600acc3823490d4c7397b"} Nov 26 15:44:18 crc kubenswrapper[5010]: I1126 15:44:18.233425 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 15:44:18 crc kubenswrapper[5010]: I1126 15:44:18.237980 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 15:44:18 crc kubenswrapper[5010]: I1126 15:44:18.272445 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" podStartSLOduration=19.644103545 podStartE2EDuration="54.272423127s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:27.173961919 +0000 UTC m=+1027.964679067" lastFinishedPulling="2025-11-26 15:44:01.802281511 +0000 UTC m=+1062.592998649" observedRunningTime="2025-11-26 15:44:18.270095999 +0000 UTC m=+1079.060813167" watchObservedRunningTime="2025-11-26 15:44:18.272423127 +0000 UTC m=+1079.063140275" Nov 26 15:44:19 crc kubenswrapper[5010]: E1126 15:44:19.567085 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" podUID="7e5769c2-7f83-41ff-9365-7f5792e8d81b" Nov 26 15:44:19 crc kubenswrapper[5010]: E1126 15:44:19.627233 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" podUID="8b2b09a7-2b17-43da-ae0e-4448b96eed50" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.260878 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" event={"ID":"ce1fedbc-31da-4c37-9731-34e79ab604f4","Type":"ContainerStarted","Data":"d258eb40e74bc7ae2e7b5884162a2dae4f827ead90f2e50a13a2c740b1eb24f8"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.264200 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" event={"ID":"93625d2a-6f36-43a8-b26c-8f6506955b15","Type":"ContainerStarted","Data":"9b37bf37953fb3749887f3053a64d98f5ca0261feec469e231881a37bf2df852"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.267588 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" event={"ID":"8b2b09a7-2b17-43da-ae0e-4448b96eed50","Type":"ContainerStarted","Data":"b81a725400e18225b1b2c0e693b3be355b7bd571eca7b1b1e62eddb4709985fe"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.270871 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" event={"ID":"bf155072-f786-47eb-9455-f807444d12e9","Type":"ContainerStarted","Data":"a3a2e91975b446dcf94c7de1d04c03b010215ecaae718f9c1f1253fb5381deac"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.275689 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" event={"ID":"3daf5f1d-5d15-4b93-ac0b-8209060a0557","Type":"ContainerStarted","Data":"f25d51cc461f144d977c02047f79730d1352e00c07a8b0e78bec7362525e4c75"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.279383 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" event={"ID":"7e5769c2-7f83-41ff-9365-7f5792e8d81b","Type":"ContainerStarted","Data":"34a7d15e6ed8d867ba4adcb2db3df80a0c8b666e899a6d5faf3d8c591c5d9cd3"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.298152 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" event={"ID":"b4799b0e-11ed-4331-84d1-daf581d00bbe","Type":"ContainerStarted","Data":"13dde15958e6ef4f4d6dcd63ee8fe488d049ece503f7e32d307474b9190ad1f6"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.300603 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" event={"ID":"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548","Type":"ContainerStarted","Data":"123aa5532e736c7d79300e4d0a083c38cf139288714201d7f45153f59a0a9a2d"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.321133 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" event={"ID":"191eef94-8fdf-4180-8ce0-1d62fc3f0de0","Type":"ContainerStarted","Data":"3cc891a04a5caa5bb149d3c23e03f41dbc296e54d386eb81d297d9f49c29b6f2"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.327927 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" event={"ID":"cdfa6310-b994-49ba-8e89-dc6584a65314","Type":"ContainerStarted","Data":"8606ed869ed200cba20c879dfa9b7f2f431c3538d3ed1f328dc53dba3bff0cdc"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.339352 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" event={"ID":"b6c13a13-621b-45cb-9830-4dfaf15ee06b","Type":"ContainerStarted","Data":"59bc11e5dbf8d2067a182af957e8d406195052587403c1a37477fb586d9c1e8e"} Nov 26 15:44:20 crc kubenswrapper[5010]: E1126 15:44:20.339757 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" podUID="ce1fedbc-31da-4c37-9731-34e79ab604f4" Nov 26 15:44:20 crc kubenswrapper[5010]: E1126 15:44:20.339870 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" podUID="01236c17-da54-4428-9e82-9a3b0165d6fc" Nov 26 15:44:20 crc kubenswrapper[5010]: E1126 15:44:20.340326 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" podUID="3daf5f1d-5d15-4b93-ac0b-8209060a0557" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.360599 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" event={"ID":"1ff0a07f-935b-493a-a18a-a449232dc185","Type":"ContainerStarted","Data":"0d300fd926402ffd3c75d086ce69ae7bb8f6d045bed1a986cc6733f4510bc8a0"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.361825 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.372221 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.374385 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" podStartSLOduration=4.503659759 podStartE2EDuration="55.374366475s" podCreationTimestamp="2025-11-26 15:43:25 +0000 UTC" firstStartedPulling="2025-11-26 15:43:27.173483637 +0000 UTC m=+1027.964200775" lastFinishedPulling="2025-11-26 15:44:18.044190343 +0000 UTC m=+1078.834907491" observedRunningTime="2025-11-26 15:44:20.368062717 +0000 UTC m=+1081.158779865" watchObservedRunningTime="2025-11-26 15:44:20.374366475 +0000 UTC m=+1081.165083623" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.378391 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" event={"ID":"05194bfa-88c3-4826-8a59-6d62252e4b1a","Type":"ContainerStarted","Data":"8ea8a2d3a40d5a37671ebf7486c912b840057e3aca0ab954bb1ae49e6a117c10"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.379466 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.385193 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" event={"ID":"b0d7107e-a617-4a7b-a6e3-0267996965ef","Type":"ContainerStarted","Data":"fa6fb85944e37ae3e6eb1efbfdda5b303dad3a06ed5906d9e39a2b5209bf9bba"} Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.385240 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.392029 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.392210 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.432649 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" podStartSLOduration=5.071199604 podStartE2EDuration="56.432628159s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.763455166 +0000 UTC m=+1027.554172304" lastFinishedPulling="2025-11-26 15:44:18.124883711 +0000 UTC m=+1078.915600859" observedRunningTime="2025-11-26 15:44:20.432350252 +0000 UTC m=+1081.223067400" watchObservedRunningTime="2025-11-26 15:44:20.432628159 +0000 UTC m=+1081.223345317" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.434217 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" podStartSLOduration=4.8033259170000004 podStartE2EDuration="55.434209969s" podCreationTimestamp="2025-11-26 15:43:25 +0000 UTC" firstStartedPulling="2025-11-26 15:43:27.188945575 +0000 UTC m=+1027.979662713" lastFinishedPulling="2025-11-26 15:44:17.819829607 +0000 UTC m=+1078.610546765" observedRunningTime="2025-11-26 15:44:20.398176953 +0000 UTC m=+1081.188894101" watchObservedRunningTime="2025-11-26 15:44:20.434209969 +0000 UTC m=+1081.224927117" Nov 26 15:44:20 crc kubenswrapper[5010]: E1126 15:44:20.452692 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" podUID="dfb4a15b-a139-4778-acc7-f236e947ca96" Nov 26 15:44:20 crc kubenswrapper[5010]: I1126 15:44:20.479185 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" podStartSLOduration=5.301063869 podStartE2EDuration="56.479161628s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.76362424 +0000 UTC m=+1027.554341388" lastFinishedPulling="2025-11-26 15:44:17.941721999 +0000 UTC m=+1078.732439147" observedRunningTime="2025-11-26 15:44:20.477570738 +0000 UTC m=+1081.268287886" watchObservedRunningTime="2025-11-26 15:44:20.479161628 +0000 UTC m=+1081.269878776" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.393322 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" event={"ID":"bf155072-f786-47eb-9455-f807444d12e9","Type":"ContainerStarted","Data":"0d2525390ed0e538b9c2159af405eb75d5ea179696a8e6d8fbfcc2770ce5b380"} Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.393762 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.395934 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" event={"ID":"01236c17-da54-4428-9e82-9a3b0165d6fc","Type":"ContainerStarted","Data":"6f30156a48320a3d5f6ee6625249a9a21fd6dda0ccf016a3fbe63b873dfb4696"} Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.398088 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" event={"ID":"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19","Type":"ContainerStarted","Data":"5a5295a08f45c5458106eb856f013f13a1d85e9fb68f51ccad65b2081ce8d360"} Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.400369 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" event={"ID":"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548","Type":"ContainerStarted","Data":"54bcd59c9651d32e6addbfe8a9fcc458d8a059c4d8705635101ab55471d894ba"} Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.400984 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.402719 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" event={"ID":"dfb4a15b-a139-4778-acc7-f236e947ca96","Type":"ContainerStarted","Data":"17f5c6a4d1c4ff571eddf52cbb85c0e21463cc701c02a5e0fa5e64b3cfa52ded"} Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.417113 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" podStartSLOduration=5.600570043 podStartE2EDuration="57.417092862s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:27.192261159 +0000 UTC m=+1027.982978307" lastFinishedPulling="2025-11-26 15:44:19.008783978 +0000 UTC m=+1079.799501126" observedRunningTime="2025-11-26 15:44:21.414906087 +0000 UTC m=+1082.205623245" watchObservedRunningTime="2025-11-26 15:44:21.417092862 +0000 UTC m=+1082.207810000" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.435448 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" podStartSLOduration=5.183687231 podStartE2EDuration="57.435424513s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.766358879 +0000 UTC m=+1027.557076047" lastFinishedPulling="2025-11-26 15:44:19.018096181 +0000 UTC m=+1079.808813329" observedRunningTime="2025-11-26 15:44:21.433494864 +0000 UTC m=+1082.224212132" watchObservedRunningTime="2025-11-26 15:44:21.435424513 +0000 UTC m=+1082.226141661" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.491722 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podStartSLOduration=6.670500754 podStartE2EDuration="57.491689446s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.776918954 +0000 UTC m=+1027.567636102" lastFinishedPulling="2025-11-26 15:44:17.598107636 +0000 UTC m=+1078.388824794" observedRunningTime="2025-11-26 15:44:21.485495051 +0000 UTC m=+1082.276212229" watchObservedRunningTime="2025-11-26 15:44:21.491689446 +0000 UTC m=+1082.282406624" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.506708 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podStartSLOduration=5.169259227 podStartE2EDuration="57.506683373s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.672012088 +0000 UTC m=+1027.462729236" lastFinishedPulling="2025-11-26 15:44:19.009436234 +0000 UTC m=+1079.800153382" observedRunningTime="2025-11-26 15:44:21.502049327 +0000 UTC m=+1082.292766565" watchObservedRunningTime="2025-11-26 15:44:21.506683373 +0000 UTC m=+1082.297400521" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.523663 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" podStartSLOduration=5.99407254 podStartE2EDuration="57.523637539s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.593933727 +0000 UTC m=+1027.384650875" lastFinishedPulling="2025-11-26 15:44:18.123498686 +0000 UTC m=+1078.914215874" observedRunningTime="2025-11-26 15:44:21.52329532 +0000 UTC m=+1082.314012468" watchObservedRunningTime="2025-11-26 15:44:21.523637539 +0000 UTC m=+1082.314354687" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.566647 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" podStartSLOduration=6.195581483 podStartE2EDuration="57.566608949s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.75248542 +0000 UTC m=+1027.543202568" lastFinishedPulling="2025-11-26 15:44:18.123512886 +0000 UTC m=+1078.914230034" observedRunningTime="2025-11-26 15:44:21.559129121 +0000 UTC m=+1082.349846279" watchObservedRunningTime="2025-11-26 15:44:21.566608949 +0000 UTC m=+1082.357326127" Nov 26 15:44:21 crc kubenswrapper[5010]: I1126 15:44:21.579671 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" podStartSLOduration=50.065936545 podStartE2EDuration="57.579652576s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:54.28697022 +0000 UTC m=+1055.077687388" lastFinishedPulling="2025-11-26 15:44:01.800686271 +0000 UTC m=+1062.591403419" observedRunningTime="2025-11-26 15:44:21.574254451 +0000 UTC m=+1082.364971599" watchObservedRunningTime="2025-11-26 15:44:21.579652576 +0000 UTC m=+1082.370369744" Nov 26 15:44:21 crc kubenswrapper[5010]: E1126 15:44:21.638232 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:51a478c52d9012c08743f63b44a3721c7ff7a0599ba9c2cf89ad54ea41b19e41\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" podUID="3daf5f1d-5d15-4b93-ac0b-8209060a0557" Nov 26 15:44:22 crc kubenswrapper[5010]: I1126 15:44:22.409911 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 15:44:22 crc kubenswrapper[5010]: I1126 15:44:22.409963 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 15:44:22 crc kubenswrapper[5010]: I1126 15:44:22.411512 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 15:44:22 crc kubenswrapper[5010]: I1126 15:44:22.412376 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 15:44:23 crc kubenswrapper[5010]: I1126 15:44:23.420408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" event={"ID":"7e5769c2-7f83-41ff-9365-7f5792e8d81b","Type":"ContainerStarted","Data":"636e667023d9a8bcd40bba7de400f2d406d56a4aa3de8ff2936814e20e6075f4"} Nov 26 15:44:23 crc kubenswrapper[5010]: I1126 15:44:23.421178 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 15:44:23 crc kubenswrapper[5010]: I1126 15:44:23.422788 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" event={"ID":"8b2b09a7-2b17-43da-ae0e-4448b96eed50","Type":"ContainerStarted","Data":"48ecdfd6ba5fa7d524fddb033a37e3916c9c2634cce289ddf1aaa3ae4e1f07b1"} Nov 26 15:44:23 crc kubenswrapper[5010]: I1126 15:44:23.445622 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" podStartSLOduration=4.068568544 podStartE2EDuration="59.445601315s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.758102091 +0000 UTC m=+1027.548819239" lastFinishedPulling="2025-11-26 15:44:22.135134822 +0000 UTC m=+1082.925852010" observedRunningTime="2025-11-26 15:44:23.440109347 +0000 UTC m=+1084.230826525" watchObservedRunningTime="2025-11-26 15:44:23.445601315 +0000 UTC m=+1084.236318463" Nov 26 15:44:23 crc kubenswrapper[5010]: I1126 15:44:23.461570 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" podStartSLOduration=4.093426079 podStartE2EDuration="59.461551416s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.76603053 +0000 UTC m=+1027.556747678" lastFinishedPulling="2025-11-26 15:44:22.134155867 +0000 UTC m=+1082.924873015" observedRunningTime="2025-11-26 15:44:23.461291359 +0000 UTC m=+1084.252008527" watchObservedRunningTime="2025-11-26 15:44:23.461551416 +0000 UTC m=+1084.252268554" Nov 26 15:44:24 crc kubenswrapper[5010]: I1126 15:44:24.430357 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 15:44:24 crc kubenswrapper[5010]: I1126 15:44:24.961058 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 15:44:24 crc kubenswrapper[5010]: I1126 15:44:24.965963 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.119499 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.439993 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" event={"ID":"dfb4a15b-a139-4778-acc7-f236e947ca96","Type":"ContainerStarted","Data":"8b37edfe16175ce928894b2392eb797fe30712ce76387cbaedf24af7351940a9"} Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.440139 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.442219 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" event={"ID":"01236c17-da54-4428-9e82-9a3b0165d6fc","Type":"ContainerStarted","Data":"cae36229a20f3bd28ea7ae47bbaaa2ba414bf41fbcef640374ee743e2bf409c6"} Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.442555 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.444206 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" event={"ID":"ce1fedbc-31da-4c37-9731-34e79ab604f4","Type":"ContainerStarted","Data":"9453bb43265bec2f03c27e2b772be454e0779f6488bb7baf96ce9657dbfeea03"} Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.445566 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.607919 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" podStartSLOduration=4.098255481 podStartE2EDuration="1m1.60788641s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.660021117 +0000 UTC m=+1027.450738265" lastFinishedPulling="2025-11-26 15:44:24.169652046 +0000 UTC m=+1084.960369194" observedRunningTime="2025-11-26 15:44:25.606280399 +0000 UTC m=+1086.396997587" watchObservedRunningTime="2025-11-26 15:44:25.60788641 +0000 UTC m=+1086.398603618" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.637319 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" podStartSLOduration=4.081937261 podStartE2EDuration="1m1.637285068s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:26.615684683 +0000 UTC m=+1027.406401831" lastFinishedPulling="2025-11-26 15:44:24.17103247 +0000 UTC m=+1084.961749638" observedRunningTime="2025-11-26 15:44:25.636384916 +0000 UTC m=+1086.427102084" watchObservedRunningTime="2025-11-26 15:44:25.637285068 +0000 UTC m=+1086.428002256" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.659653 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" podStartSLOduration=4.218442234 podStartE2EDuration="1m0.65962706s" podCreationTimestamp="2025-11-26 15:43:25 +0000 UTC" firstStartedPulling="2025-11-26 15:43:27.173403555 +0000 UTC m=+1027.964120703" lastFinishedPulling="2025-11-26 15:44:23.614588371 +0000 UTC m=+1084.405305529" observedRunningTime="2025-11-26 15:44:25.654942792 +0000 UTC m=+1086.445659940" watchObservedRunningTime="2025-11-26 15:44:25.65962706 +0000 UTC m=+1086.450344208" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.678494 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.681516 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 15:44:25 crc kubenswrapper[5010]: I1126 15:44:25.750650 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 15:44:30 crc kubenswrapper[5010]: I1126 15:44:30.863525 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:44:30 crc kubenswrapper[5010]: I1126 15:44:30.870962 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 15:44:35 crc kubenswrapper[5010]: I1126 15:44:35.117358 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 15:44:35 crc kubenswrapper[5010]: I1126 15:44:35.131971 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 15:44:35 crc kubenswrapper[5010]: I1126 15:44:35.229656 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 15:44:35 crc kubenswrapper[5010]: I1126 15:44:35.493020 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 15:44:35 crc kubenswrapper[5010]: I1126 15:44:35.779003 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 15:44:38 crc kubenswrapper[5010]: I1126 15:44:38.581905 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" event={"ID":"3daf5f1d-5d15-4b93-ac0b-8209060a0557","Type":"ContainerStarted","Data":"426e6bef153a7bb9774ad4371f1c7a77967e38df0b192c5dc24524a981ff0b37"} Nov 26 15:44:38 crc kubenswrapper[5010]: I1126 15:44:38.582974 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:44:38 crc kubenswrapper[5010]: I1126 15:44:38.624446 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" podStartSLOduration=31.364402228 podStartE2EDuration="1m14.624416406s" podCreationTimestamp="2025-11-26 15:43:24 +0000 UTC" firstStartedPulling="2025-11-26 15:43:54.285864572 +0000 UTC m=+1055.076581720" lastFinishedPulling="2025-11-26 15:44:37.54587876 +0000 UTC m=+1098.336595898" observedRunningTime="2025-11-26 15:44:38.622378475 +0000 UTC m=+1099.413095643" watchObservedRunningTime="2025-11-26 15:44:38.624416406 +0000 UTC m=+1099.415133594" Nov 26 15:44:41 crc kubenswrapper[5010]: I1126 15:44:41.422659 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:44:41 crc kubenswrapper[5010]: I1126 15:44:41.423236 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:44:50 crc kubenswrapper[5010]: I1126 15:44:50.998103 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.149980 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl"] Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.152317 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.155124 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.155502 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.175385 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzkzq\" (UniqueName: \"kubernetes.io/projected/9f957d9d-bfaf-449a-be3a-a20de204b99b-kube-api-access-tzkzq\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.175823 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f957d9d-bfaf-449a-be3a-a20de204b99b-config-volume\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.176012 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f957d9d-bfaf-449a-be3a-a20de204b99b-secret-volume\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.178260 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl"] Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.277187 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f957d9d-bfaf-449a-be3a-a20de204b99b-secret-volume\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.277281 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzkzq\" (UniqueName: \"kubernetes.io/projected/9f957d9d-bfaf-449a-be3a-a20de204b99b-kube-api-access-tzkzq\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.277330 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f957d9d-bfaf-449a-be3a-a20de204b99b-config-volume\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.278625 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f957d9d-bfaf-449a-be3a-a20de204b99b-config-volume\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.287423 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f957d9d-bfaf-449a-be3a-a20de204b99b-secret-volume\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.297314 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzkzq\" (UniqueName: \"kubernetes.io/projected/9f957d9d-bfaf-449a-be3a-a20de204b99b-kube-api-access-tzkzq\") pod \"collect-profiles-29402865-gzsvl\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.476238 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:00 crc kubenswrapper[5010]: I1126 15:45:00.912007 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl"] Nov 26 15:45:01 crc kubenswrapper[5010]: I1126 15:45:01.833998 5010 generic.go:334] "Generic (PLEG): container finished" podID="9f957d9d-bfaf-449a-be3a-a20de204b99b" containerID="63ab4d5dd60774b6439a77917b8e090bf455e9c84964e03e121136838bfd668e" exitCode=0 Nov 26 15:45:01 crc kubenswrapper[5010]: I1126 15:45:01.834098 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" event={"ID":"9f957d9d-bfaf-449a-be3a-a20de204b99b","Type":"ContainerDied","Data":"63ab4d5dd60774b6439a77917b8e090bf455e9c84964e03e121136838bfd668e"} Nov 26 15:45:01 crc kubenswrapper[5010]: I1126 15:45:01.834532 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" event={"ID":"9f957d9d-bfaf-449a-be3a-a20de204b99b","Type":"ContainerStarted","Data":"7c7965512632c45e76c3f16691b1ef6f7c867b78cd814acc8959c478967f5f41"} Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.182473 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.220278 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f957d9d-bfaf-449a-be3a-a20de204b99b-secret-volume\") pod \"9f957d9d-bfaf-449a-be3a-a20de204b99b\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.220447 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f957d9d-bfaf-449a-be3a-a20de204b99b-config-volume\") pod \"9f957d9d-bfaf-449a-be3a-a20de204b99b\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.220585 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzkzq\" (UniqueName: \"kubernetes.io/projected/9f957d9d-bfaf-449a-be3a-a20de204b99b-kube-api-access-tzkzq\") pod \"9f957d9d-bfaf-449a-be3a-a20de204b99b\" (UID: \"9f957d9d-bfaf-449a-be3a-a20de204b99b\") " Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.221610 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f957d9d-bfaf-449a-be3a-a20de204b99b-config-volume" (OuterVolumeSpecName: "config-volume") pod "9f957d9d-bfaf-449a-be3a-a20de204b99b" (UID: "9f957d9d-bfaf-449a-be3a-a20de204b99b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.229046 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f957d9d-bfaf-449a-be3a-a20de204b99b-kube-api-access-tzkzq" (OuterVolumeSpecName: "kube-api-access-tzkzq") pod "9f957d9d-bfaf-449a-be3a-a20de204b99b" (UID: "9f957d9d-bfaf-449a-be3a-a20de204b99b"). InnerVolumeSpecName "kube-api-access-tzkzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.235214 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f957d9d-bfaf-449a-be3a-a20de204b99b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "9f957d9d-bfaf-449a-be3a-a20de204b99b" (UID: "9f957d9d-bfaf-449a-be3a-a20de204b99b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.321511 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9f957d9d-bfaf-449a-be3a-a20de204b99b-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.321566 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzkzq\" (UniqueName: \"kubernetes.io/projected/9f957d9d-bfaf-449a-be3a-a20de204b99b-kube-api-access-tzkzq\") on node \"crc\" DevicePath \"\"" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.321580 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/9f957d9d-bfaf-449a-be3a-a20de204b99b-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.851989 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" event={"ID":"9f957d9d-bfaf-449a-be3a-a20de204b99b","Type":"ContainerDied","Data":"7c7965512632c45e76c3f16691b1ef6f7c867b78cd814acc8959c478967f5f41"} Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.852052 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c7965512632c45e76c3f16691b1ef6f7c867b78cd814acc8959c478967f5f41" Nov 26 15:45:03 crc kubenswrapper[5010]: I1126 15:45:03.852078 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.862891 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-vnj6v"] Nov 26 15:45:06 crc kubenswrapper[5010]: E1126 15:45:06.864219 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f957d9d-bfaf-449a-be3a-a20de204b99b" containerName="collect-profiles" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.864237 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f957d9d-bfaf-449a-be3a-a20de204b99b" containerName="collect-profiles" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.864445 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f957d9d-bfaf-449a-be3a-a20de204b99b" containerName="collect-profiles" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.868528 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.873867 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.876272 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.876598 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.877427 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-vsxp6" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.878142 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v25g8\" (UniqueName: \"kubernetes.io/projected/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-kube-api-access-v25g8\") pod \"dnsmasq-dns-7bdd77c89-vnj6v\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.878187 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-config\") pod \"dnsmasq-dns-7bdd77c89-vnj6v\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.898556 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-vnj6v"] Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.960812 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6584b49599-t6ftg"] Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.962613 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.965264 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.979370 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-config\") pod \"dnsmasq-dns-7bdd77c89-vnj6v\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.979494 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v25g8\" (UniqueName: \"kubernetes.io/projected/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-kube-api-access-v25g8\") pod \"dnsmasq-dns-7bdd77c89-vnj6v\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.980603 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-config\") pod \"dnsmasq-dns-7bdd77c89-vnj6v\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:06 crc kubenswrapper[5010]: I1126 15:45:06.983110 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-t6ftg"] Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.020594 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v25g8\" (UniqueName: \"kubernetes.io/projected/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-kube-api-access-v25g8\") pod \"dnsmasq-dns-7bdd77c89-vnj6v\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.081907 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-dns-svc\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.082409 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-config\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.082510 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klsxp\" (UniqueName: \"kubernetes.io/projected/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-kube-api-access-klsxp\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.183480 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-dns-svc\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.183635 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-config\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.183662 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klsxp\" (UniqueName: \"kubernetes.io/projected/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-kube-api-access-klsxp\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.184471 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-dns-svc\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.184478 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-config\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.199167 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.204034 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klsxp\" (UniqueName: \"kubernetes.io/projected/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-kube-api-access-klsxp\") pod \"dnsmasq-dns-6584b49599-t6ftg\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.277639 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.705237 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-vnj6v"] Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.843770 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-t6ftg"] Nov 26 15:45:07 crc kubenswrapper[5010]: W1126 15:45:07.858290 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8b6e4803_3d06_4677_a0c5_c45c4bf70ffd.slice/crio-97fa90a1fd292f902f3971c064afce524a12caeb19c7c01919f20b3d0dd9486f WatchSource:0}: Error finding container 97fa90a1fd292f902f3971c064afce524a12caeb19c7c01919f20b3d0dd9486f: Status 404 returned error can't find the container with id 97fa90a1fd292f902f3971c064afce524a12caeb19c7c01919f20b3d0dd9486f Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.902229 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" event={"ID":"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f","Type":"ContainerStarted","Data":"3cf685dfb54bd9f783078e2b7056dfec113ddd1592c977bb626c4621d127fd7f"} Nov 26 15:45:07 crc kubenswrapper[5010]: I1126 15:45:07.902270 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-t6ftg" event={"ID":"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd","Type":"ContainerStarted","Data":"97fa90a1fd292f902f3971c064afce524a12caeb19c7c01919f20b3d0dd9486f"} Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.764739 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-t6ftg"] Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.799121 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-wdnmw"] Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.800799 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.832697 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-wdnmw"] Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.845344 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.845765 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-config\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.845964 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhxhz\" (UniqueName: \"kubernetes.io/projected/bd9c2008-aaf0-461b-ae6a-496daa336018-kube-api-access-hhxhz\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.948111 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.948172 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-config\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.948192 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhxhz\" (UniqueName: \"kubernetes.io/projected/bd9c2008-aaf0-461b-ae6a-496daa336018-kube-api-access-hhxhz\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.949661 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.950015 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-config\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:09 crc kubenswrapper[5010]: I1126 15:45:09.996324 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhxhz\" (UniqueName: \"kubernetes.io/projected/bd9c2008-aaf0-461b-ae6a-496daa336018-kube-api-access-hhxhz\") pod \"dnsmasq-dns-7c6d9948dc-wdnmw\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.156314 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.426437 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-vnj6v"] Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.474127 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-7vpwr"] Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.476502 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.506195 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-7vpwr"] Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.586391 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-dns-svc\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.586652 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgz5d\" (UniqueName: \"kubernetes.io/projected/b4aec875-840f-42a8-adcb-9d903e050409-kube-api-access-fgz5d\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.586816 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-config\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.692083 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-dns-svc\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.692169 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgz5d\" (UniqueName: \"kubernetes.io/projected/b4aec875-840f-42a8-adcb-9d903e050409-kube-api-access-fgz5d\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.692223 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-config\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.693646 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-config\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.693927 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-dns-svc\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.713230 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgz5d\" (UniqueName: \"kubernetes.io/projected/b4aec875-840f-42a8-adcb-9d903e050409-kube-api-access-fgz5d\") pod \"dnsmasq-dns-6486446b9f-7vpwr\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.826487 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:45:10 crc kubenswrapper[5010]: I1126 15:45:10.972055 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-wdnmw"] Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.050005 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.051790 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.055345 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.055470 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-f72bm" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.055366 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.055747 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.055786 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.055964 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.055993 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.074914 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.104896 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9940cbe6-c323-4320-9e45-463e5c023156-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106077 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106226 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106490 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9940cbe6-c323-4320-9e45-463e5c023156-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106533 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvpxs\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-kube-api-access-cvpxs\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106599 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106669 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106746 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.106914 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.107030 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.107184 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213411 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213510 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213638 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213735 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9940cbe6-c323-4320-9e45-463e5c023156-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213794 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213873 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213950 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9940cbe6-c323-4320-9e45-463e5c023156-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.213979 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvpxs\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-kube-api-access-cvpxs\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.214016 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.214060 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.215152 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.215901 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.216565 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.216948 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.216952 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.217250 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.223668 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9940cbe6-c323-4320-9e45-463e5c023156-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.224091 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.228391 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9940cbe6-c323-4320-9e45-463e5c023156-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.228817 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.250956 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.253648 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvpxs\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-kube-api-access-cvpxs\") pod \"rabbitmq-server-0\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.412919 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.424134 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.424231 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.424294 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.425241 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"59f84423fa85afba142264d8718184fcb64f0d905168b9c5b86ca7f3cd897062"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.425590 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://59f84423fa85afba142264d8718184fcb64f0d905168b9c5b86ca7f3cd897062" gracePeriod=600 Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.428599 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-7vpwr"] Nov 26 15:45:11 crc kubenswrapper[5010]: W1126 15:45:11.437661 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4aec875_840f_42a8_adcb_9d903e050409.slice/crio-144c0275ed6a5608e50a8643e53a56d8a5c1af43979b2f79f5958fda769e6c39 WatchSource:0}: Error finding container 144c0275ed6a5608e50a8643e53a56d8a5c1af43979b2f79f5958fda769e6c39: Status 404 returned error can't find the container with id 144c0275ed6a5608e50a8643e53a56d8a5c1af43979b2f79f5958fda769e6c39 Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.589690 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.591459 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.597545 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.597760 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.598778 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.598979 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.598974 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.599114 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.605232 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-6kq8z" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.606846 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.725533 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726178 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726326 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726469 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726551 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726602 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4hmk\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-kube-api-access-w4hmk\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726662 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726691 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726746 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726770 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.726852 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.829823 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.832507 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4hmk\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-kube-api-access-w4hmk\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.832832 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.832903 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.832996 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.833051 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.833172 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.833316 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.833404 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.833443 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.833531 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.835343 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.836059 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.836645 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.846589 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.847982 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.847982 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.850225 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.853351 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.853832 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.858764 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.860335 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4hmk\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-kube-api-access-w4hmk\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.869410 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.939159 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.947295 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" event={"ID":"bd9c2008-aaf0-461b-ae6a-496daa336018","Type":"ContainerStarted","Data":"78245d0fdc070c9647dd5600e211cca374cf21530abf288e00dcae883996dae9"} Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.950753 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.957239 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="59f84423fa85afba142264d8718184fcb64f0d905168b9c5b86ca7f3cd897062" exitCode=0 Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.957339 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"59f84423fa85afba142264d8718184fcb64f0d905168b9c5b86ca7f3cd897062"} Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.957374 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"74af0b7ad1bdddc342c1daa4543b045a23faf8e3bd5f2a3ae5f6ba14cafd4e61"} Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.957393 5010 scope.go:117] "RemoveContainer" containerID="866a4d79b3a741e66d3af7f04184bb9e206692b2113aca2fc0a5c00bbc84fa10" Nov 26 15:45:11 crc kubenswrapper[5010]: I1126 15:45:11.959294 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" event={"ID":"b4aec875-840f-42a8-adcb-9d903e050409","Type":"ContainerStarted","Data":"144c0275ed6a5608e50a8643e53a56d8a5c1af43979b2f79f5958fda769e6c39"} Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.211070 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.212848 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.215801 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-f2q25" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.216645 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.217214 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.217758 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.230135 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.230181 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341390 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-default\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341497 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341564 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341596 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-kolla-config\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341626 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341652 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pljx7\" (UniqueName: \"kubernetes.io/projected/99fb2212-9383-48c9-b976-1e93a19c3ce1-kube-api-access-pljx7\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341676 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.341733 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.443356 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.443995 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pljx7\" (UniqueName: \"kubernetes.io/projected/99fb2212-9383-48c9-b976-1e93a19c3ce1-kube-api-access-pljx7\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.444030 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.444070 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.444095 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-default\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.444157 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.444213 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.444245 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-kolla-config\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.455269 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-generated\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.456154 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.456998 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-kolla-config\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.458013 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-default\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.459299 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-operator-scripts\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.467976 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.468684 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.468741 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.483746 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pljx7\" (UniqueName: \"kubernetes.io/projected/99fb2212-9383-48c9-b976-1e93a19c3ce1-kube-api-access-pljx7\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.506351 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.537171 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.975933 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25","Type":"ContainerStarted","Data":"1bc145d0dfde452d66b5f3fca7cfb50b88e28dfb6407f0282c78f91505761933"} Nov 26 15:45:12 crc kubenswrapper[5010]: I1126 15:45:12.978562 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9940cbe6-c323-4320-9e45-463e5c023156","Type":"ContainerStarted","Data":"396c4191d4a6d39950260965f0734390f17733ce7451b70b55426d555c087132"} Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.185483 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.670182 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.674026 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.679747 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-zsfw2" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.680078 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.680317 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.683215 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.698320 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.802581 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dpp8m\" (UniqueName: \"kubernetes.io/projected/1afd71d7-914c-4e41-b04f-0325049fa972-kube-api-access-dpp8m\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.803002 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.803204 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.803365 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.803548 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.803757 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.803814 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.803966 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.910438 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.910797 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dpp8m\" (UniqueName: \"kubernetes.io/projected/1afd71d7-914c-4e41-b04f-0325049fa972-kube-api-access-dpp8m\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.910892 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.911006 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.911040 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.911370 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.911475 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.911484 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.911549 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.911947 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.912545 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.913197 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.913195 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.919284 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.941901 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.944878 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dpp8m\" (UniqueName: \"kubernetes.io/projected/1afd71d7-914c-4e41-b04f-0325049fa972-kube-api-access-dpp8m\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:13 crc kubenswrapper[5010]: I1126 15:45:13.966892 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.000069 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"99fb2212-9383-48c9-b976-1e93a19c3ce1","Type":"ContainerStarted","Data":"e5fce497303a18e6cda621e977799dfe711f0b769848eddaade6f7e9e44dd246"} Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.018271 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.700773 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.734236 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.740114 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.745484 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.745756 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.745904 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-bpfv8" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.749613 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.831281 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-kolla-config\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.831334 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-config-data\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.831364 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7qws2\" (UniqueName: \"kubernetes.io/projected/6243a3e1-835d-4150-afea-1f2bb0032065-kube-api-access-7qws2\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.831387 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.831819 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.934434 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.934571 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-kolla-config\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.934594 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-config-data\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.934622 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7qws2\" (UniqueName: \"kubernetes.io/projected/6243a3e1-835d-4150-afea-1f2bb0032065-kube-api-access-7qws2\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.934642 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.936454 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-kolla-config\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.938629 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-config-data\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.945812 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.947855 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:14 crc kubenswrapper[5010]: I1126 15:45:14.958595 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7qws2\" (UniqueName: \"kubernetes.io/projected/6243a3e1-835d-4150-afea-1f2bb0032065-kube-api-access-7qws2\") pod \"memcached-0\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " pod="openstack/memcached-0" Nov 26 15:45:15 crc kubenswrapper[5010]: I1126 15:45:15.028934 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1afd71d7-914c-4e41-b04f-0325049fa972","Type":"ContainerStarted","Data":"bde65109f64657d34decd49aca5e3b7a212c01cc6bf169fe8c9cdcb366ada8ce"} Nov 26 15:45:15 crc kubenswrapper[5010]: I1126 15:45:15.099570 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 15:45:15 crc kubenswrapper[5010]: I1126 15:45:15.761297 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.003084 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.004323 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.008128 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-9b9c9" Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.022695 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.209651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9lz4\" (UniqueName: \"kubernetes.io/projected/65356e91-f417-4d3c-8298-cd16cd182fea-kube-api-access-s9lz4\") pod \"kube-state-metrics-0\" (UID: \"65356e91-f417-4d3c-8298-cd16cd182fea\") " pod="openstack/kube-state-metrics-0" Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.313656 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9lz4\" (UniqueName: \"kubernetes.io/projected/65356e91-f417-4d3c-8298-cd16cd182fea-kube-api-access-s9lz4\") pod \"kube-state-metrics-0\" (UID: \"65356e91-f417-4d3c-8298-cd16cd182fea\") " pod="openstack/kube-state-metrics-0" Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.337027 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9lz4\" (UniqueName: \"kubernetes.io/projected/65356e91-f417-4d3c-8298-cd16cd182fea-kube-api-access-s9lz4\") pod \"kube-state-metrics-0\" (UID: \"65356e91-f417-4d3c-8298-cd16cd182fea\") " pod="openstack/kube-state-metrics-0" Nov 26 15:45:16 crc kubenswrapper[5010]: I1126 15:45:16.630589 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.005200 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-nbrh7"] Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.007130 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.017150 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.017444 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-8zpnm" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.017542 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.017605 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-f7n92"] Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.022399 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.022512 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-ovn-controller-tls-certs\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.022537 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82n4f\" (UniqueName: \"kubernetes.io/projected/3261dde1-64a6-4fe7-851e-4a5754444fd0-kube-api-access-82n4f\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.022559 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run-ovn\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.022585 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.022600 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-combined-ca-bundle\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.022903 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.023091 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-log-ovn\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.042488 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-f7n92"] Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.075314 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-nbrh7"] Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126004 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-run\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126082 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-log-ovn\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126128 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94cm8\" (UniqueName: \"kubernetes.io/projected/d1c2d398-f284-40d9-beb4-cd3121568f5a-kube-api-access-94cm8\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126161 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-lib\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126224 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d1c2d398-f284-40d9-beb4-cd3121568f5a-scripts\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126250 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-log\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126357 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-etc-ovs\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126408 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126456 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-ovn-controller-tls-certs\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126480 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82n4f\" (UniqueName: \"kubernetes.io/projected/3261dde1-64a6-4fe7-851e-4a5754444fd0-kube-api-access-82n4f\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126513 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run-ovn\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126546 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.126586 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-combined-ca-bundle\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.128098 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.128245 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-log-ovn\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.128367 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run-ovn\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.134683 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-combined-ca-bundle\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.135434 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-ovn-controller-tls-certs\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.136347 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.152569 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82n4f\" (UniqueName: \"kubernetes.io/projected/3261dde1-64a6-4fe7-851e-4a5754444fd0-kube-api-access-82n4f\") pod \"ovn-controller-nbrh7\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.228092 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-run\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.228195 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94cm8\" (UniqueName: \"kubernetes.io/projected/d1c2d398-f284-40d9-beb4-cd3121568f5a-kube-api-access-94cm8\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.228237 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-lib\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.228269 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d1c2d398-f284-40d9-beb4-cd3121568f5a-scripts\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.228299 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-log\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.228332 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-etc-ovs\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.228676 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-etc-ovs\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.229076 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-lib\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.229144 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-log\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.229772 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-run\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.231211 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d1c2d398-f284-40d9-beb4-cd3121568f5a-scripts\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.248392 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94cm8\" (UniqueName: \"kubernetes.io/projected/d1c2d398-f284-40d9-beb4-cd3121568f5a-kube-api-access-94cm8\") pod \"ovn-controller-ovs-f7n92\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.356233 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.373651 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.846286 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.849046 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.854517 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.854851 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.855070 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.855330 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.855491 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-zl9pn" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.902411 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.942756 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-config\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.942813 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.942885 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.942903 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6txx\" (UniqueName: \"kubernetes.io/projected/5780f988-6f45-4fdb-9a2b-f149c0499552-kube-api-access-w6txx\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.943061 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.943157 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.943285 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:20 crc kubenswrapper[5010]: I1126 15:45:20.943365 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044018 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044072 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6txx\" (UniqueName: \"kubernetes.io/projected/5780f988-6f45-4fdb-9a2b-f149c0499552-kube-api-access-w6txx\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044095 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044177 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044243 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044267 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044301 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-config\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.044333 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.046417 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-config\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.046452 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.046535 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.048086 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.050721 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.051211 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.052754 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.064208 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6txx\" (UniqueName: \"kubernetes.io/projected/5780f988-6f45-4fdb-9a2b-f149c0499552-kube-api-access-w6txx\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.085406 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"ovsdbserver-nb-0\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:21 crc kubenswrapper[5010]: I1126 15:45:21.221396 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.468871 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.470941 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.473697 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-6jzlk" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.474141 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.474822 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.474829 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.502275 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.594364 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.594419 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.594452 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-config\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.594784 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.594873 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.595024 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pz2c\" (UniqueName: \"kubernetes.io/projected/776a1766-4e7d-4ea0-bd5b-18b6b352448a-kube-api-access-8pz2c\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.595242 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.595358 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.697380 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.697467 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.697514 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-config\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.697601 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.697672 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.697780 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pz2c\" (UniqueName: \"kubernetes.io/projected/776a1766-4e7d-4ea0-bd5b-18b6b352448a-kube-api-access-8pz2c\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.697904 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.698507 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.698691 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.698866 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.698811 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.700050 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-config\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.707686 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.707811 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.708900 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.727338 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pz2c\" (UniqueName: \"kubernetes.io/projected/776a1766-4e7d-4ea0-bd5b-18b6b352448a-kube-api-access-8pz2c\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.728015 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-sb-0\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:23 crc kubenswrapper[5010]: I1126 15:45:23.794915 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 15:45:58 crc kubenswrapper[5010]: W1126 15:45:58.119041 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6243a3e1_835d_4150_afea_1f2bb0032065.slice/crio-62b8d5ed244e315b9c8dbb554f638dc2740e655df728683a208d538650ccbdde WatchSource:0}: Error finding container 62b8d5ed244e315b9c8dbb554f638dc2740e655df728683a208d538650ccbdde: Status 404 returned error can't find the container with id 62b8d5ed244e315b9c8dbb554f638dc2740e655df728683a208d538650ccbdde Nov 26 15:45:58 crc kubenswrapper[5010]: I1126 15:45:58.569856 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6243a3e1-835d-4150-afea-1f2bb0032065","Type":"ContainerStarted","Data":"62b8d5ed244e315b9c8dbb554f638dc2740e655df728683a208d538650ccbdde"} Nov 26 15:46:02 crc kubenswrapper[5010]: E1126 15:46:02.531239 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce" Nov 26 15:46:02 crc kubenswrapper[5010]: E1126 15:46:02.531944 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pljx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(99fb2212-9383-48c9-b976-1e93a19c3ce1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:02 crc kubenswrapper[5010]: E1126 15:46:02.534354 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" Nov 26 15:46:02 crc kubenswrapper[5010]: E1126 15:46:02.603616 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce\\\"\"" pod="openstack/openstack-galera-0" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" Nov 26 15:46:11 crc kubenswrapper[5010]: E1126 15:46:11.174782 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 15:46:11 crc kubenswrapper[5010]: E1126 15:46:11.176030 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-klsxp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6584b49599-t6ftg_openstack(8b6e4803-3d06-4677-a0c5-c45c4bf70ffd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:11 crc kubenswrapper[5010]: E1126 15:46:11.177548 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6584b49599-t6ftg" podUID="8b6e4803-3d06-4677-a0c5-c45c4bf70ffd" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.539508 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.700438 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-t6ftg" event={"ID":"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd","Type":"ContainerDied","Data":"97fa90a1fd292f902f3971c064afce524a12caeb19c7c01919f20b3d0dd9486f"} Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.700513 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-t6ftg" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.706201 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-config\") pod \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.706240 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-dns-svc\") pod \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.706323 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-klsxp\" (UniqueName: \"kubernetes.io/projected/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-kube-api-access-klsxp\") pod \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\" (UID: \"8b6e4803-3d06-4677-a0c5-c45c4bf70ffd\") " Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.707354 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-config" (OuterVolumeSpecName: "config") pod "8b6e4803-3d06-4677-a0c5-c45c4bf70ffd" (UID: "8b6e4803-3d06-4677-a0c5-c45c4bf70ffd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.707535 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8b6e4803-3d06-4677-a0c5-c45c4bf70ffd" (UID: "8b6e4803-3d06-4677-a0c5-c45c4bf70ffd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.712875 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-kube-api-access-klsxp" (OuterVolumeSpecName: "kube-api-access-klsxp") pod "8b6e4803-3d06-4677-a0c5-c45c4bf70ffd" (UID: "8b6e4803-3d06-4677-a0c5-c45c4bf70ffd"). InnerVolumeSpecName "kube-api-access-klsxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.808912 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.808963 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.808979 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-klsxp\" (UniqueName: \"kubernetes.io/projected/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd-kube-api-access-klsxp\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.860296 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-nbrh7"] Nov 26 15:46:12 crc kubenswrapper[5010]: I1126 15:46:12.960326 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.070959 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-t6ftg"] Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.078763 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-t6ftg"] Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.125134 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.125385 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hhxhz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7c6d9948dc-wdnmw_openstack(bd9c2008-aaf0-461b-ae6a-496daa336018): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.126565 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" podUID="bd9c2008-aaf0-461b-ae6a-496daa336018" Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.219412 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.276850 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.277133 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cvpxs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(9940cbe6-c323-4320-9e45-463e5c023156): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.278372 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="9940cbe6-c323-4320-9e45-463e5c023156" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.571266 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.571951 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w4hmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.573923 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.716107 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"776a1766-4e7d-4ea0-bd5b-18b6b352448a","Type":"ContainerStarted","Data":"a1a9c340e7269e2e729f030ae3f170d597b82dca49c03d6efa6539817b46b698"} Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.718679 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7" event={"ID":"3261dde1-64a6-4fe7-851e-4a5754444fd0","Type":"ContainerStarted","Data":"2c056ceb03fc4922675379e167bfde32a50fa9cbfe7831a79c5efae5e491ab00"} Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.720390 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65356e91-f417-4d3c-8298-cd16cd182fea","Type":"ContainerStarted","Data":"50eddd0272617a2bdb446683f931db808bc6755f4e13c7add8fde594afd09c44"} Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.722386 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.722505 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b\\\"\"" pod="openstack/rabbitmq-server-0" podUID="9940cbe6-c323-4320-9e45-463e5c023156" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.723156 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba\\\"\"" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" podUID="bd9c2008-aaf0-461b-ae6a-496daa336018" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.725001 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.725216 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v25g8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-7bdd77c89-vnj6v_openstack(a39d3aa0-c7da-43b3-96e0-cd65c918ce0f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.726417 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" podUID="a39d3aa0-c7da-43b3-96e0-cd65c918ce0f" Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.740044 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-f7n92"] Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.890633 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.891735 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.891981 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fgz5d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-6486446b9f-7vpwr_openstack(b4aec875-840f-42a8-adcb-9d903e050409): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:13 crc kubenswrapper[5010]: E1126 15:46:13.893588 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" podUID="b4aec875-840f-42a8-adcb-9d903e050409" Nov 26 15:46:13 crc kubenswrapper[5010]: I1126 15:46:13.905269 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b6e4803-3d06-4677-a0c5-c45c4bf70ffd" path="/var/lib/kubelet/pods/8b6e4803-3d06-4677-a0c5-c45c4bf70ffd/volumes" Nov 26 15:46:14 crc kubenswrapper[5010]: W1126 15:46:14.553854 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5780f988_6f45_4fdb_9a2b_f149c0499552.slice/crio-26d6f1cf84cd8abc09967ed7f818439e51559f49fe4de7feea9c5f0dc231260a WatchSource:0}: Error finding container 26d6f1cf84cd8abc09967ed7f818439e51559f49fe4de7feea9c5f0dc231260a: Status 404 returned error can't find the container with id 26d6f1cf84cd8abc09967ed7f818439e51559f49fe4de7feea9c5f0dc231260a Nov 26 15:46:14 crc kubenswrapper[5010]: W1126 15:46:14.566843 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1c2d398_f284_40d9_beb4_cd3121568f5a.slice/crio-22f32d683476f41ee20dbaa98c03ee0e656f0251623e4f6aa417942bbbef2634 WatchSource:0}: Error finding container 22f32d683476f41ee20dbaa98c03ee0e656f0251623e4f6aa417942bbbef2634: Status 404 returned error can't find the container with id 22f32d683476f41ee20dbaa98c03ee0e656f0251623e4f6aa417942bbbef2634 Nov 26 15:46:14 crc kubenswrapper[5010]: I1126 15:46:14.731608 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerStarted","Data":"22f32d683476f41ee20dbaa98c03ee0e656f0251623e4f6aa417942bbbef2634"} Nov 26 15:46:14 crc kubenswrapper[5010]: I1126 15:46:14.734455 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5780f988-6f45-4fdb-9a2b-f149c0499552","Type":"ContainerStarted","Data":"26d6f1cf84cd8abc09967ed7f818439e51559f49fe4de7feea9c5f0dc231260a"} Nov 26 15:46:14 crc kubenswrapper[5010]: E1126 15:46:14.737870 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba\\\"\"" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" podUID="b4aec875-840f-42a8-adcb-9d903e050409" Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.128851 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.254428 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v25g8\" (UniqueName: \"kubernetes.io/projected/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-kube-api-access-v25g8\") pod \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.254671 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-config\") pod \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\" (UID: \"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f\") " Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.255897 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-config" (OuterVolumeSpecName: "config") pod "a39d3aa0-c7da-43b3-96e0-cd65c918ce0f" (UID: "a39d3aa0-c7da-43b3-96e0-cd65c918ce0f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.263614 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-kube-api-access-v25g8" (OuterVolumeSpecName: "kube-api-access-v25g8") pod "a39d3aa0-c7da-43b3-96e0-cd65c918ce0f" (UID: "a39d3aa0-c7da-43b3-96e0-cd65c918ce0f"). InnerVolumeSpecName "kube-api-access-v25g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.358037 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.358107 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v25g8\" (UniqueName: \"kubernetes.io/projected/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f-kube-api-access-v25g8\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.750361 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1afd71d7-914c-4e41-b04f-0325049fa972","Type":"ContainerStarted","Data":"55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396"} Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.752352 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" event={"ID":"a39d3aa0-c7da-43b3-96e0-cd65c918ce0f","Type":"ContainerDied","Data":"3cf685dfb54bd9f783078e2b7056dfec113ddd1592c977bb626c4621d127fd7f"} Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.752440 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-vnj6v" Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.838748 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-vnj6v"] Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.847529 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-vnj6v"] Nov 26 15:46:15 crc kubenswrapper[5010]: I1126 15:46:15.907315 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a39d3aa0-c7da-43b3-96e0-cd65c918ce0f" path="/var/lib/kubelet/pods/a39d3aa0-c7da-43b3-96e0-cd65c918ce0f/volumes" Nov 26 15:46:19 crc kubenswrapper[5010]: I1126 15:46:19.791262 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"99fb2212-9383-48c9-b976-1e93a19c3ce1","Type":"ContainerStarted","Data":"a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58"} Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.411224 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-vl2vn"] Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.414328 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.418170 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.421984 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-vl2vn"] Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.538058 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-combined-ca-bundle\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.538125 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6a5d15-b08c-481b-84af-88e05824b26a-config\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.538153 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovn-rundir\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.538181 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.538205 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovs-rundir\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.538284 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hglv6\" (UniqueName: \"kubernetes.io/projected/9d6a5d15-b08c-481b-84af-88e05824b26a-kube-api-access-hglv6\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.579493 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-wdnmw"] Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.638960 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-npwt2"] Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.639927 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6a5d15-b08c-481b-84af-88e05824b26a-config\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.639990 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovn-rundir\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.640025 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovs-rundir\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.640041 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.640072 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hglv6\" (UniqueName: \"kubernetes.io/projected/9d6a5d15-b08c-481b-84af-88e05824b26a-kube-api-access-hglv6\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.640172 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-combined-ca-bundle\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.640579 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.641598 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovs-rundir\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.642836 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6a5d15-b08c-481b-84af-88e05824b26a-config\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.643130 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.643396 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovn-rundir\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.647631 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-npwt2"] Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.648304 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-combined-ca-bundle\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.650489 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.673624 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hglv6\" (UniqueName: \"kubernetes.io/projected/9d6a5d15-b08c-481b-84af-88e05824b26a-kube-api-access-hglv6\") pod \"ovn-controller-metrics-vl2vn\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.742271 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-dns-svc\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.742458 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d92k\" (UniqueName: \"kubernetes.io/projected/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-kube-api-access-7d92k\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.742554 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-config\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.742619 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.753089 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.845676 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d92k\" (UniqueName: \"kubernetes.io/projected/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-kube-api-access-7d92k\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.845776 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-config\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.845817 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.845845 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-dns-svc\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.847058 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-dns-svc\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.847108 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-config\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.847252 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-ovsdbserver-nb\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.869553 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d92k\" (UniqueName: \"kubernetes.io/projected/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-kube-api-access-7d92k\") pod \"dnsmasq-dns-65c78595c5-npwt2\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.874831 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-7vpwr"] Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.913872 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-6pxjv"] Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.915481 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.917619 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 15:46:24 crc kubenswrapper[5010]: I1126 15:46:24.930112 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-6pxjv"] Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.012574 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.048837 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.048895 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.048963 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk9k2\" (UniqueName: \"kubernetes.io/projected/83c93744-06d7-4cf1-8770-a93ca140fa31-kube-api-access-nk9k2\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.049016 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.049143 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-config\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.150617 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk9k2\" (UniqueName: \"kubernetes.io/projected/83c93744-06d7-4cf1-8770-a93ca140fa31-kube-api-access-nk9k2\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.150693 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.150798 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-config\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.150833 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.150852 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.151783 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.152363 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.153354 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.156263 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-config\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.175955 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk9k2\" (UniqueName: \"kubernetes.io/projected/83c93744-06d7-4cf1-8770-a93ca140fa31-kube-api-access-nk9k2\") pod \"dnsmasq-dns-5c7b6b5695-6pxjv\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:25 crc kubenswrapper[5010]: I1126 15:46:25.248327 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.581083 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.749821 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-dns-svc\") pod \"bd9c2008-aaf0-461b-ae6a-496daa336018\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.750494 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bd9c2008-aaf0-461b-ae6a-496daa336018" (UID: "bd9c2008-aaf0-461b-ae6a-496daa336018"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.750505 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhxhz\" (UniqueName: \"kubernetes.io/projected/bd9c2008-aaf0-461b-ae6a-496daa336018-kube-api-access-hhxhz\") pod \"bd9c2008-aaf0-461b-ae6a-496daa336018\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.750612 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-config\") pod \"bd9c2008-aaf0-461b-ae6a-496daa336018\" (UID: \"bd9c2008-aaf0-461b-ae6a-496daa336018\") " Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.751795 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-config" (OuterVolumeSpecName: "config") pod "bd9c2008-aaf0-461b-ae6a-496daa336018" (UID: "bd9c2008-aaf0-461b-ae6a-496daa336018"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.753355 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.753375 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bd9c2008-aaf0-461b-ae6a-496daa336018-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.765522 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd9c2008-aaf0-461b-ae6a-496daa336018-kube-api-access-hhxhz" (OuterVolumeSpecName: "kube-api-access-hhxhz") pod "bd9c2008-aaf0-461b-ae6a-496daa336018" (UID: "bd9c2008-aaf0-461b-ae6a-496daa336018"). InnerVolumeSpecName "kube-api-access-hhxhz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.855524 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhxhz\" (UniqueName: \"kubernetes.io/projected/bd9c2008-aaf0-461b-ae6a-496daa336018-kube-api-access-hhxhz\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.884479 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" event={"ID":"bd9c2008-aaf0-461b-ae6a-496daa336018","Type":"ContainerDied","Data":"78245d0fdc070c9647dd5600e211cca374cf21530abf288e00dcae883996dae9"} Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.884564 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-wdnmw" Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.973332 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-wdnmw"] Nov 26 15:46:29 crc kubenswrapper[5010]: I1126 15:46:29.984827 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-wdnmw"] Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.882531 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.895154 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" event={"ID":"b4aec875-840f-42a8-adcb-9d903e050409","Type":"ContainerDied","Data":"144c0275ed6a5608e50a8643e53a56d8a5c1af43979b2f79f5958fda769e6c39"} Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.895264 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-7vpwr" Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.976623 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgz5d\" (UniqueName: \"kubernetes.io/projected/b4aec875-840f-42a8-adcb-9d903e050409-kube-api-access-fgz5d\") pod \"b4aec875-840f-42a8-adcb-9d903e050409\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.976808 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-config\") pod \"b4aec875-840f-42a8-adcb-9d903e050409\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.976909 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-dns-svc\") pod \"b4aec875-840f-42a8-adcb-9d903e050409\" (UID: \"b4aec875-840f-42a8-adcb-9d903e050409\") " Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.977754 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b4aec875-840f-42a8-adcb-9d903e050409" (UID: "b4aec875-840f-42a8-adcb-9d903e050409"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.977852 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-config" (OuterVolumeSpecName: "config") pod "b4aec875-840f-42a8-adcb-9d903e050409" (UID: "b4aec875-840f-42a8-adcb-9d903e050409"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:30 crc kubenswrapper[5010]: I1126 15:46:30.983551 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4aec875-840f-42a8-adcb-9d903e050409-kube-api-access-fgz5d" (OuterVolumeSpecName: "kube-api-access-fgz5d") pod "b4aec875-840f-42a8-adcb-9d903e050409" (UID: "b4aec875-840f-42a8-adcb-9d903e050409"). InnerVolumeSpecName "kube-api-access-fgz5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:46:31 crc kubenswrapper[5010]: I1126 15:46:31.078534 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:31 crc kubenswrapper[5010]: I1126 15:46:31.078571 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgz5d\" (UniqueName: \"kubernetes.io/projected/b4aec875-840f-42a8-adcb-9d903e050409-kube-api-access-fgz5d\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:31 crc kubenswrapper[5010]: I1126 15:46:31.078583 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4aec875-840f-42a8-adcb-9d903e050409-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:31 crc kubenswrapper[5010]: I1126 15:46:31.244751 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-7vpwr"] Nov 26 15:46:31 crc kubenswrapper[5010]: I1126 15:46:31.254741 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-7vpwr"] Nov 26 15:46:31 crc kubenswrapper[5010]: I1126 15:46:31.904105 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4aec875-840f-42a8-adcb-9d903e050409" path="/var/lib/kubelet/pods/b4aec875-840f-42a8-adcb-9d903e050409/volumes" Nov 26 15:46:31 crc kubenswrapper[5010]: I1126 15:46:31.904484 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd9c2008-aaf0-461b-ae6a-496daa336018" path="/var/lib/kubelet/pods/bd9c2008-aaf0-461b-ae6a-496daa336018/volumes" Nov 26 15:46:32 crc kubenswrapper[5010]: E1126 15:46:32.569252 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server@sha256:fc9c99eeef91523482bd8f92661b393287e1f2a24ad2ba9e33191f8de9af74cf" Nov 26 15:46:32 crc kubenswrapper[5010]: E1126 15:46:32.569681 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ovsdbserver-nb,Image:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server@sha256:fc9c99eeef91523482bd8f92661b393287e1f2a24ad2ba9e33191f8de9af74cf,Command:[/usr/bin/dumb-init],Args:[/usr/local/bin/container-scripts/setup.sh],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n699h55ch87h66h88h96h669h597h98h547h575hc8h5d7h9hc9h648hc9h55bhcch687h675h559h98h659h568h674h556h65fh65fh5fh5dchd4q,ValueFrom:nil,},EnvVar{Name:OVN_LOGDIR,Value:/tmp,ValueFrom:nil,},EnvVar{Name:OVN_RUNDIR,Value:/tmp,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovndbcluster-nb-etc-ovn,ReadOnly:false,MountPath:/etc/ovn,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndb.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovndb.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-w6txx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:&Lifecycle{PostStart:nil,PreStop:&LifecycleHandler{Exec:&ExecAction{Command:[/usr/local/bin/container-scripts/cleanup.sh],},HTTPGet:nil,TCPSocket:nil,Sleep:nil,},},TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/pidof ovsdb-server],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:20,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(5780f988-6f45-4fdb-9a2b-f149c0499552): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:46:33 crc kubenswrapper[5010]: I1126 15:46:33.362323 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-6pxjv"] Nov 26 15:46:33 crc kubenswrapper[5010]: I1126 15:46:33.469637 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-vl2vn"] Nov 26 15:46:33 crc kubenswrapper[5010]: I1126 15:46:33.477626 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-npwt2"] Nov 26 15:46:33 crc kubenswrapper[5010]: I1126 15:46:33.930535 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerStarted","Data":"684a7192db7883e6dfeb8517a5b35048195da02842af78b06b5df20e3d3d7f64"} Nov 26 15:46:33 crc kubenswrapper[5010]: I1126 15:46:33.932271 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6243a3e1-835d-4150-afea-1f2bb0032065","Type":"ContainerStarted","Data":"592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f"} Nov 26 15:46:33 crc kubenswrapper[5010]: I1126 15:46:33.932491 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 15:46:33 crc kubenswrapper[5010]: I1126 15:46:33.974126 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=47.313233613 podStartE2EDuration="1m19.974104382s" podCreationTimestamp="2025-11-26 15:45:14 +0000 UTC" firstStartedPulling="2025-11-26 15:45:58.135394162 +0000 UTC m=+1178.926111350" lastFinishedPulling="2025-11-26 15:46:30.796264961 +0000 UTC m=+1211.586982119" observedRunningTime="2025-11-26 15:46:33.972068801 +0000 UTC m=+1214.762785959" watchObservedRunningTime="2025-11-26 15:46:33.974104382 +0000 UTC m=+1214.764821540" Nov 26 15:46:34 crc kubenswrapper[5010]: W1126 15:46:34.429183 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d6a5d15_b08c_481b_84af_88e05824b26a.slice/crio-99e88f857ab4b78968b57ef15d5105a2ce09eb772a71e5a2943666e4d92d38e0 WatchSource:0}: Error finding container 99e88f857ab4b78968b57ef15d5105a2ce09eb772a71e5a2943666e4d92d38e0: Status 404 returned error can't find the container with id 99e88f857ab4b78968b57ef15d5105a2ce09eb772a71e5a2943666e4d92d38e0 Nov 26 15:46:34 crc kubenswrapper[5010]: W1126 15:46:34.438495 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83c93744_06d7_4cf1_8770_a93ca140fa31.slice/crio-c5100b45042269ae5e66f18787e74f07143e2234165275cddaf650913b0bd5f1 WatchSource:0}: Error finding container c5100b45042269ae5e66f18787e74f07143e2234165275cddaf650913b0bd5f1: Status 404 returned error can't find the container with id c5100b45042269ae5e66f18787e74f07143e2234165275cddaf650913b0bd5f1 Nov 26 15:46:34 crc kubenswrapper[5010]: I1126 15:46:34.950187 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" event={"ID":"83c93744-06d7-4cf1-8770-a93ca140fa31","Type":"ContainerStarted","Data":"c5100b45042269ae5e66f18787e74f07143e2234165275cddaf650913b0bd5f1"} Nov 26 15:46:34 crc kubenswrapper[5010]: I1126 15:46:34.953054 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-vl2vn" event={"ID":"9d6a5d15-b08c-481b-84af-88e05824b26a","Type":"ContainerStarted","Data":"99e88f857ab4b78968b57ef15d5105a2ce09eb772a71e5a2943666e4d92d38e0"} Nov 26 15:46:34 crc kubenswrapper[5010]: I1126 15:46:34.956436 5010 generic.go:334] "Generic (PLEG): container finished" podID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerID="684a7192db7883e6dfeb8517a5b35048195da02842af78b06b5df20e3d3d7f64" exitCode=0 Nov 26 15:46:34 crc kubenswrapper[5010]: I1126 15:46:34.956545 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerDied","Data":"684a7192db7883e6dfeb8517a5b35048195da02842af78b06b5df20e3d3d7f64"} Nov 26 15:46:34 crc kubenswrapper[5010]: I1126 15:46:34.958370 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" event={"ID":"cc975d2c-bffd-4de4-9431-43fbdd64bfd5","Type":"ContainerStarted","Data":"796457aceba4ce615fb7ac83c416dedeed773e5fc38ccea748b5ad97e3a7f185"} Nov 26 15:46:36 crc kubenswrapper[5010]: I1126 15:46:36.979856 5010 generic.go:334] "Generic (PLEG): container finished" podID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerID="a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58" exitCode=0 Nov 26 15:46:36 crc kubenswrapper[5010]: I1126 15:46:36.980224 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"99fb2212-9383-48c9-b976-1e93a19c3ce1","Type":"ContainerDied","Data":"a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58"} Nov 26 15:46:36 crc kubenswrapper[5010]: I1126 15:46:36.983212 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25","Type":"ContainerStarted","Data":"ab6cade5267022ce5c3a9112e0b1e51b93929e7dcbe177fc49bab18f72aaf1a2"} Nov 26 15:46:39 crc kubenswrapper[5010]: I1126 15:46:39.003626 5010 generic.go:334] "Generic (PLEG): container finished" podID="1afd71d7-914c-4e41-b04f-0325049fa972" containerID="55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396" exitCode=0 Nov 26 15:46:39 crc kubenswrapper[5010]: I1126 15:46:39.003767 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1afd71d7-914c-4e41-b04f-0325049fa972","Type":"ContainerDied","Data":"55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396"} Nov 26 15:46:39 crc kubenswrapper[5010]: I1126 15:46:39.007022 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9940cbe6-c323-4320-9e45-463e5c023156","Type":"ContainerStarted","Data":"fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd"} Nov 26 15:46:40 crc kubenswrapper[5010]: I1126 15:46:40.101635 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 15:46:41 crc kubenswrapper[5010]: I1126 15:46:41.035098 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7" event={"ID":"3261dde1-64a6-4fe7-851e-4a5754444fd0","Type":"ContainerStarted","Data":"e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02"} Nov 26 15:46:42 crc kubenswrapper[5010]: I1126 15:46:42.042928 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-nbrh7" Nov 26 15:46:42 crc kubenswrapper[5010]: I1126 15:46:42.062805 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-nbrh7" podStartSLOduration=63.525299937 podStartE2EDuration="1m23.062700362s" podCreationTimestamp="2025-11-26 15:45:19 +0000 UTC" firstStartedPulling="2025-11-26 15:46:13.367650513 +0000 UTC m=+1194.158367661" lastFinishedPulling="2025-11-26 15:46:32.905050938 +0000 UTC m=+1213.695768086" observedRunningTime="2025-11-26 15:46:42.062357793 +0000 UTC m=+1222.853074971" watchObservedRunningTime="2025-11-26 15:46:42.062700362 +0000 UTC m=+1222.853417510" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.413631 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-6pxjv"] Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.454018 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2mg29"] Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.455500 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.478506 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2mg29"] Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.580692 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.580778 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.580801 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-config\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.580878 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46tlv\" (UniqueName: \"kubernetes.io/projected/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-kube-api-access-46tlv\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.580908 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.682143 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46tlv\" (UniqueName: \"kubernetes.io/projected/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-kube-api-access-46tlv\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.682239 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.682319 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.682358 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.682385 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-config\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.683737 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-config\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.684788 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.685370 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.686017 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.708087 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46tlv\" (UniqueName: \"kubernetes.io/projected/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-kube-api-access-46tlv\") pod \"dnsmasq-dns-cf8bcbfcf-2mg29\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:46 crc kubenswrapper[5010]: I1126 15:46:46.775297 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.610845 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.619830 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.621941 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.622540 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.622750 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-mjfmn" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.623773 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.636829 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.803175 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.803328 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gmddg\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-kube-api-access-gmddg\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.803360 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.803540 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-lock\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.803567 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-cache\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.904736 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-cache\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.904782 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.904825 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gmddg\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-kube-api-access-gmddg\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.904855 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.904937 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-lock\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: E1126 15:46:47.905120 5010 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 15:46:47 crc kubenswrapper[5010]: E1126 15:46:47.905172 5010 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 15:46:47 crc kubenswrapper[5010]: E1126 15:46:47.905266 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift podName:1803fc99-2cc8-44e7-8ce5-eac5bc548f88 nodeName:}" failed. No retries permitted until 2025-11-26 15:46:48.405231502 +0000 UTC m=+1229.195948690 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift") pod "swift-storage-0" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88") : configmap "swift-ring-files" not found Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.905800 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.906090 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-lock\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.906109 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-cache\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.936701 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:47 crc kubenswrapper[5010]: I1126 15:46:47.948250 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gmddg\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-kube-api-access-gmddg\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.200624 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-hdcdl"] Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.208164 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.212215 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.212292 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.212478 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.220688 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-hdcdl"] Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.319608 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-ring-data-devices\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.319728 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnhb4\" (UniqueName: \"kubernetes.io/projected/260f1345-096d-4d94-901e-943c3d9e4135-kube-api-access-nnhb4\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.319787 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-scripts\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.319826 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-combined-ca-bundle\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.319938 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/260f1345-096d-4d94-901e-943c3d9e4135-etc-swift\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.320051 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-swiftconf\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.320136 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-dispersionconf\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421553 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-combined-ca-bundle\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421614 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/260f1345-096d-4d94-901e-943c3d9e4135-etc-swift\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421647 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-swiftconf\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421682 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-dispersionconf\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421759 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-ring-data-devices\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421805 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnhb4\" (UniqueName: \"kubernetes.io/projected/260f1345-096d-4d94-901e-943c3d9e4135-kube-api-access-nnhb4\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421834 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.421856 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-scripts\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: E1126 15:46:48.422041 5010 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 15:46:48 crc kubenswrapper[5010]: E1126 15:46:48.422081 5010 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 15:46:48 crc kubenswrapper[5010]: E1126 15:46:48.422150 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift podName:1803fc99-2cc8-44e7-8ce5-eac5bc548f88 nodeName:}" failed. No retries permitted until 2025-11-26 15:46:49.422122339 +0000 UTC m=+1230.212839487 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift") pod "swift-storage-0" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88") : configmap "swift-ring-files" not found Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.423026 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-ring-data-devices\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.423065 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-scripts\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.423390 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/260f1345-096d-4d94-901e-943c3d9e4135-etc-swift\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.428630 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-combined-ca-bundle\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.429504 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-dispersionconf\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.443270 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-swiftconf\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.443835 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnhb4\" (UniqueName: \"kubernetes.io/projected/260f1345-096d-4d94-901e-943c3d9e4135-kube-api-access-nnhb4\") pod \"swift-ring-rebalance-hdcdl\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:48 crc kubenswrapper[5010]: I1126 15:46:48.531876 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:46:49 crc kubenswrapper[5010]: I1126 15:46:49.440108 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:49 crc kubenswrapper[5010]: E1126 15:46:49.440440 5010 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 15:46:49 crc kubenswrapper[5010]: E1126 15:46:49.440485 5010 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 15:46:49 crc kubenswrapper[5010]: E1126 15:46:49.440600 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift podName:1803fc99-2cc8-44e7-8ce5-eac5bc548f88 nodeName:}" failed. No retries permitted until 2025-11-26 15:46:51.440565058 +0000 UTC m=+1232.231282246 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift") pod "swift-storage-0" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88") : configmap "swift-ring-files" not found Nov 26 15:46:51 crc kubenswrapper[5010]: I1126 15:46:51.476024 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:51 crc kubenswrapper[5010]: E1126 15:46:51.476198 5010 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 15:46:51 crc kubenswrapper[5010]: E1126 15:46:51.477057 5010 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 15:46:51 crc kubenswrapper[5010]: E1126 15:46:51.477135 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift podName:1803fc99-2cc8-44e7-8ce5-eac5bc548f88 nodeName:}" failed. No retries permitted until 2025-11-26 15:46:55.477114671 +0000 UTC m=+1236.267831819 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift") pod "swift-storage-0" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88") : configmap "swift-ring-files" not found Nov 26 15:46:51 crc kubenswrapper[5010]: E1126 15:46:51.690704 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb" Nov 26 15:46:51 crc kubenswrapper[5010]: E1126 15:46:51.690815 5010 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb" Nov 26 15:46:51 crc kubenswrapper[5010]: E1126 15:46:51.690992 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-state-metrics,Image:registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb,Command:[],Args:[--resources=pods --namespaces=openstack],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http-metrics,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},ContainerPort{Name:telemetry,HostPort:0,ContainerPort:8081,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-s9lz4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/livez,Port:{0 8080 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod kube-state-metrics-0_openstack(65356e91-f417-4d3c-8298-cd16cd182fea): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 15:46:51 crc kubenswrapper[5010]: E1126 15:46:51.692258 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openstack/kube-state-metrics-0" podUID="65356e91-f417-4d3c-8298-cd16cd182fea" Nov 26 15:46:52 crc kubenswrapper[5010]: E1126 15:46:52.215622 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-state-metrics\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb\\\"\"" pod="openstack/kube-state-metrics-0" podUID="65356e91-f417-4d3c-8298-cd16cd182fea" Nov 26 15:46:52 crc kubenswrapper[5010]: E1126 15:46:52.465891 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovsdbserver-nb\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" Nov 26 15:46:52 crc kubenswrapper[5010]: I1126 15:46:52.475567 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2mg29"] Nov 26 15:46:52 crc kubenswrapper[5010]: I1126 15:46:52.552195 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-hdcdl"] Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.158742 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5780f988-6f45-4fdb-9a2b-f149c0499552","Type":"ContainerStarted","Data":"969966e67ae90d742a77f84466bf294b5b02f4399d3b508d206d36643320950e"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.167671 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1afd71d7-914c-4e41-b04f-0325049fa972","Type":"ContainerStarted","Data":"fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.171533 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-vl2vn" event={"ID":"9d6a5d15-b08c-481b-84af-88e05824b26a","Type":"ContainerStarted","Data":"d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.173579 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"99fb2212-9383-48c9-b976-1e93a19c3ce1","Type":"ContainerStarted","Data":"8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.174921 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hdcdl" event={"ID":"260f1345-096d-4d94-901e-943c3d9e4135","Type":"ContainerStarted","Data":"65707cacee357a5d112c160838e3b4aafc3a7b6f8b73c476b42bbfea7ad41e6c"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.176945 5010 generic.go:334] "Generic (PLEG): container finished" podID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerID="d999b41463033977ca3cd35b81d04bae4e6f76bc7a11b714ab21ed87a6d0c4ff" exitCode=0 Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.177544 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" event={"ID":"29b615e2-07e5-4456-93e6-1e2a2c5c8a38","Type":"ContainerDied","Data":"d999b41463033977ca3cd35b81d04bae4e6f76bc7a11b714ab21ed87a6d0c4ff"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.177567 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" event={"ID":"29b615e2-07e5-4456-93e6-1e2a2c5c8a38","Type":"ContainerStarted","Data":"a9377803eac38c9d9037b5b0a8577d9e9fed76a0f50516eb7562147a5504e9f9"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.194849 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerStarted","Data":"5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.194916 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerStarted","Data":"b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.195204 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.195276 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.199172 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"776a1766-4e7d-4ea0-bd5b-18b6b352448a","Type":"ContainerStarted","Data":"76be070eec53259277ed1ae9bfb0c4bc5bd14cf0b5a29ace0f621ed64c42f411"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.199217 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"776a1766-4e7d-4ea0-bd5b-18b6b352448a","Type":"ContainerStarted","Data":"51be9ebe273584a0cde1861b72fd331776c123194b60da0b480c2bacde3385dc"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.202826 5010 generic.go:334] "Generic (PLEG): container finished" podID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerID="1e6912d835f3e6556a1c6e8dab7b684c62516a3785f8ea9638e9484841283b97" exitCode=0 Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.202938 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" event={"ID":"cc975d2c-bffd-4de4-9431-43fbdd64bfd5","Type":"ContainerDied","Data":"1e6912d835f3e6556a1c6e8dab7b684c62516a3785f8ea9638e9484841283b97"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.205990 5010 generic.go:334] "Generic (PLEG): container finished" podID="83c93744-06d7-4cf1-8770-a93ca140fa31" containerID="ee90a8f3c77d0c3f8debda64ee563b363485f98d63480f39084e9ed536cdf5de" exitCode=0 Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.206060 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" event={"ID":"83c93744-06d7-4cf1-8770-a93ca140fa31","Type":"ContainerDied","Data":"ee90a8f3c77d0c3f8debda64ee563b363485f98d63480f39084e9ed536cdf5de"} Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.215813 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=41.428661735 podStartE2EDuration="1m41.215786971s" podCreationTimestamp="2025-11-26 15:45:12 +0000 UTC" firstStartedPulling="2025-11-26 15:45:14.764866206 +0000 UTC m=+1135.555583354" lastFinishedPulling="2025-11-26 15:46:14.551991442 +0000 UTC m=+1195.342708590" observedRunningTime="2025-11-26 15:46:53.208475678 +0000 UTC m=+1233.999192836" watchObservedRunningTime="2025-11-26 15:46:53.215786971 +0000 UTC m=+1234.006504189" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.281066 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-vl2vn" podStartSLOduration=11.607109795 podStartE2EDuration="29.281038494s" podCreationTimestamp="2025-11-26 15:46:24 +0000 UTC" firstStartedPulling="2025-11-26 15:46:34.440004065 +0000 UTC m=+1215.230721253" lastFinishedPulling="2025-11-26 15:46:52.113932794 +0000 UTC m=+1232.904649952" observedRunningTime="2025-11-26 15:46:53.237397342 +0000 UTC m=+1234.028114500" watchObservedRunningTime="2025-11-26 15:46:53.281038494 +0000 UTC m=+1234.071755642" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.308061 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=-9223371934.546747 podStartE2EDuration="1m42.308028899s" podCreationTimestamp="2025-11-26 15:45:11 +0000 UTC" firstStartedPulling="2025-11-26 15:45:13.209612319 +0000 UTC m=+1134.000329467" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:46:53.280264045 +0000 UTC m=+1234.070981203" watchObservedRunningTime="2025-11-26 15:46:53.308028899 +0000 UTC m=+1234.098746047" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.313913 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=71.52171359 podStartE2EDuration="1m31.313889116s" podCreationTimestamp="2025-11-26 15:45:22 +0000 UTC" firstStartedPulling="2025-11-26 15:46:13.367947801 +0000 UTC m=+1194.158664949" lastFinishedPulling="2025-11-26 15:46:33.160123327 +0000 UTC m=+1213.950840475" observedRunningTime="2025-11-26 15:46:53.306104261 +0000 UTC m=+1234.096821439" watchObservedRunningTime="2025-11-26 15:46:53.313889116 +0000 UTC m=+1234.104606264" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.362808 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-f7n92" podStartSLOduration=76.314265818 podStartE2EDuration="1m34.362779589s" podCreationTimestamp="2025-11-26 15:45:19 +0000 UTC" firstStartedPulling="2025-11-26 15:46:14.573561245 +0000 UTC m=+1195.364278433" lastFinishedPulling="2025-11-26 15:46:32.622075056 +0000 UTC m=+1213.412792204" observedRunningTime="2025-11-26 15:46:53.330338487 +0000 UTC m=+1234.121055635" watchObservedRunningTime="2025-11-26 15:46:53.362779589 +0000 UTC m=+1234.153496737" Nov 26 15:46:53 crc kubenswrapper[5010]: E1126 15:46:53.582036 5010 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 26 15:46:53 crc kubenswrapper[5010]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/cc975d2c-bffd-4de4-9431-43fbdd64bfd5/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 26 15:46:53 crc kubenswrapper[5010]: > podSandboxID="796457aceba4ce615fb7ac83c416dedeed773e5fc38ccea748b5ad97e3a7f185" Nov 26 15:46:53 crc kubenswrapper[5010]: E1126 15:46:53.582697 5010 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 26 15:46:53 crc kubenswrapper[5010]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n58bh65dh95hf6h595hf6hf5h59dh6h57dh558h55ch5dbh5f5h565h5f7h9fh76h58ch54dh84h59bh7fh6bh5b9h59h67fh566h56h5f4h554h58fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7d92k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-65c78595c5-npwt2_openstack(cc975d2c-bffd-4de4-9431-43fbdd64bfd5): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/cc975d2c-bffd-4de4-9431-43fbdd64bfd5/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 26 15:46:53 crc kubenswrapper[5010]: > logger="UnhandledError" Nov 26 15:46:53 crc kubenswrapper[5010]: E1126 15:46:53.583816 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/cc975d2c-bffd-4de4-9431-43fbdd64bfd5/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.670030 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.795697 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.795768 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.830492 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-config\") pod \"83c93744-06d7-4cf1-8770-a93ca140fa31\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.830668 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nk9k2\" (UniqueName: \"kubernetes.io/projected/83c93744-06d7-4cf1-8770-a93ca140fa31-kube-api-access-nk9k2\") pod \"83c93744-06d7-4cf1-8770-a93ca140fa31\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.830838 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-dns-svc\") pod \"83c93744-06d7-4cf1-8770-a93ca140fa31\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.830977 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-nb\") pod \"83c93744-06d7-4cf1-8770-a93ca140fa31\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.831947 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-sb\") pod \"83c93744-06d7-4cf1-8770-a93ca140fa31\" (UID: \"83c93744-06d7-4cf1-8770-a93ca140fa31\") " Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.837592 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83c93744-06d7-4cf1-8770-a93ca140fa31-kube-api-access-nk9k2" (OuterVolumeSpecName: "kube-api-access-nk9k2") pod "83c93744-06d7-4cf1-8770-a93ca140fa31" (UID: "83c93744-06d7-4cf1-8770-a93ca140fa31"). InnerVolumeSpecName "kube-api-access-nk9k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.854494 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "83c93744-06d7-4cf1-8770-a93ca140fa31" (UID: "83c93744-06d7-4cf1-8770-a93ca140fa31"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.854591 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "83c93744-06d7-4cf1-8770-a93ca140fa31" (UID: "83c93744-06d7-4cf1-8770-a93ca140fa31"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.857910 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-config" (OuterVolumeSpecName: "config") pod "83c93744-06d7-4cf1-8770-a93ca140fa31" (UID: "83c93744-06d7-4cf1-8770-a93ca140fa31"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.858787 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "83c93744-06d7-4cf1-8770-a93ca140fa31" (UID: "83c93744-06d7-4cf1-8770-a93ca140fa31"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.935098 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nk9k2\" (UniqueName: \"kubernetes.io/projected/83c93744-06d7-4cf1-8770-a93ca140fa31-kube-api-access-nk9k2\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.935130 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.935145 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.935212 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:53 crc kubenswrapper[5010]: I1126 15:46:53.935245 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c93744-06d7-4cf1-8770-a93ca140fa31-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.019178 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.019241 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.222489 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.223177 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-6pxjv" event={"ID":"83c93744-06d7-4cf1-8770-a93ca140fa31","Type":"ContainerDied","Data":"c5100b45042269ae5e66f18787e74f07143e2234165275cddaf650913b0bd5f1"} Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.223236 5010 scope.go:117] "RemoveContainer" containerID="ee90a8f3c77d0c3f8debda64ee563b363485f98d63480f39084e9ed536cdf5de" Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.228925 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5780f988-6f45-4fdb-9a2b-f149c0499552","Type":"ContainerStarted","Data":"8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb"} Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.262335 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" event={"ID":"29b615e2-07e5-4456-93e6-1e2a2c5c8a38","Type":"ContainerStarted","Data":"a60cce7f5232cf4a19c7a095215a9f4426de0e9578f05c344c30cee1c1b6ea05"} Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.262892 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.300722 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-6pxjv"] Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.332411 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-6pxjv"] Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.337941 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=56.003769256 podStartE2EDuration="1m35.337911775s" podCreationTimestamp="2025-11-26 15:45:19 +0000 UTC" firstStartedPulling="2025-11-26 15:46:14.559921052 +0000 UTC m=+1195.350638240" lastFinishedPulling="2025-11-26 15:46:53.894063611 +0000 UTC m=+1234.684780759" observedRunningTime="2025-11-26 15:46:54.294483799 +0000 UTC m=+1235.085200977" watchObservedRunningTime="2025-11-26 15:46:54.337911775 +0000 UTC m=+1235.128628933" Nov 26 15:46:54 crc kubenswrapper[5010]: I1126 15:46:54.352400 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" podStartSLOduration=8.352375147 podStartE2EDuration="8.352375147s" podCreationTimestamp="2025-11-26 15:46:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:46:54.324529651 +0000 UTC m=+1235.115246809" watchObservedRunningTime="2025-11-26 15:46:54.352375147 +0000 UTC m=+1235.143092295" Nov 26 15:46:55 crc kubenswrapper[5010]: I1126 15:46:55.479169 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:46:55 crc kubenswrapper[5010]: E1126 15:46:55.479471 5010 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 15:46:55 crc kubenswrapper[5010]: E1126 15:46:55.479914 5010 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 15:46:55 crc kubenswrapper[5010]: E1126 15:46:55.479990 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift podName:1803fc99-2cc8-44e7-8ce5-eac5bc548f88 nodeName:}" failed. No retries permitted until 2025-11-26 15:47:03.479967189 +0000 UTC m=+1244.270684337 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift") pod "swift-storage-0" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88") : configmap "swift-ring-files" not found Nov 26 15:46:55 crc kubenswrapper[5010]: I1126 15:46:55.904232 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83c93744-06d7-4cf1-8770-a93ca140fa31" path="/var/lib/kubelet/pods/83c93744-06d7-4cf1-8770-a93ca140fa31/volumes" Nov 26 15:46:56 crc kubenswrapper[5010]: I1126 15:46:56.221845 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 15:46:56 crc kubenswrapper[5010]: I1126 15:46:56.854588 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 15:46:57 crc kubenswrapper[5010]: I1126 15:46:57.222105 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 15:46:57 crc kubenswrapper[5010]: I1126 15:46:57.276132 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 15:46:57 crc kubenswrapper[5010]: I1126 15:46:57.292504 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hdcdl" event={"ID":"260f1345-096d-4d94-901e-943c3d9e4135","Type":"ContainerStarted","Data":"b8ec1a9cdb303364bb02c0d64077536cd7666be13b0d6b8dbdc15f743fa920ab"} Nov 26 15:46:57 crc kubenswrapper[5010]: I1126 15:46:57.295622 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" event={"ID":"cc975d2c-bffd-4de4-9431-43fbdd64bfd5","Type":"ContainerStarted","Data":"e4dfa35226de8f69d686990d31e67ffdb0bc53ac3962c07bf57184830ab65e77"} Nov 26 15:46:57 crc kubenswrapper[5010]: I1126 15:46:57.295951 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:46:57 crc kubenswrapper[5010]: I1126 15:46:57.345417 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-hdcdl" podStartSLOduration=5.064155896 podStartE2EDuration="9.34539334s" podCreationTimestamp="2025-11-26 15:46:48 +0000 UTC" firstStartedPulling="2025-11-26 15:46:52.575872571 +0000 UTC m=+1233.366589719" lastFinishedPulling="2025-11-26 15:46:56.857110025 +0000 UTC m=+1237.647827163" observedRunningTime="2025-11-26 15:46:57.335629466 +0000 UTC m=+1238.126346634" watchObservedRunningTime="2025-11-26 15:46:57.34539334 +0000 UTC m=+1238.136110488" Nov 26 15:46:57 crc kubenswrapper[5010]: I1126 15:46:57.360087 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" podStartSLOduration=15.868397181 podStartE2EDuration="33.360061537s" podCreationTimestamp="2025-11-26 15:46:24 +0000 UTC" firstStartedPulling="2025-11-26 15:46:34.452004027 +0000 UTC m=+1215.242721185" lastFinishedPulling="2025-11-26 15:46:51.943668383 +0000 UTC m=+1232.734385541" observedRunningTime="2025-11-26 15:46:57.357154844 +0000 UTC m=+1238.147872042" watchObservedRunningTime="2025-11-26 15:46:57.360061537 +0000 UTC m=+1238.150778685" Nov 26 15:46:57 crc kubenswrapper[5010]: E1126 15:46:57.752896 5010 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.154:57418->38.102.83.154:42721: write tcp 38.102.83.154:57418->38.102.83.154:42721: write: broken pipe Nov 26 15:46:58 crc kubenswrapper[5010]: I1126 15:46:58.868694 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 15:47:00 crc kubenswrapper[5010]: I1126 15:47:00.131906 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 15:47:00 crc kubenswrapper[5010]: I1126 15:47:00.236493 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.285365 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.475831 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 15:47:01 crc kubenswrapper[5010]: E1126 15:47:01.476315 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83c93744-06d7-4cf1-8770-a93ca140fa31" containerName="init" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.476345 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="83c93744-06d7-4cf1-8770-a93ca140fa31" containerName="init" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.476567 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="83c93744-06d7-4cf1-8770-a93ca140fa31" containerName="init" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.478083 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.485421 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.485693 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.486287 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-c7vn6" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.488905 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.500295 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.607495 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.607553 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.607897 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-scripts\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.608101 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.608190 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bffsw\" (UniqueName: \"kubernetes.io/projected/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-kube-api-access-bffsw\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.608312 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-config\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.608454 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.711601 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-scripts\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.712221 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.712299 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bffsw\" (UniqueName: \"kubernetes.io/projected/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-kube-api-access-bffsw\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.712398 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-config\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.712505 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.712698 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.712833 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.713228 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-config\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.713500 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.713734 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-scripts\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.721250 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.729967 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.731783 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.741998 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bffsw\" (UniqueName: \"kubernetes.io/projected/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-kube-api-access-bffsw\") pod \"ovn-northd-0\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.777997 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.811273 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.871264 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-npwt2"] Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.871593 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerName="dnsmasq-dns" containerID="cri-o://e4dfa35226de8f69d686990d31e67ffdb0bc53ac3962c07bf57184830ab65e77" gracePeriod=10 Nov 26 15:47:01 crc kubenswrapper[5010]: I1126 15:47:01.873430 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.318920 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 15:47:02 crc kubenswrapper[5010]: W1126 15:47:02.323325 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9fea0e4_4e18_4d7e_9af0_fd46b742565c.slice/crio-9cabee4cf956a9f8bba6dd4f186f505519eea586ef206c92ad2e1d18b25f952a WatchSource:0}: Error finding container 9cabee4cf956a9f8bba6dd4f186f505519eea586ef206c92ad2e1d18b25f952a: Status 404 returned error can't find the container with id 9cabee4cf956a9f8bba6dd4f186f505519eea586ef206c92ad2e1d18b25f952a Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.339022 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9fea0e4-4e18-4d7e-9af0-fd46b742565c","Type":"ContainerStarted","Data":"9cabee4cf956a9f8bba6dd4f186f505519eea586ef206c92ad2e1d18b25f952a"} Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.341867 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" event={"ID":"cc975d2c-bffd-4de4-9431-43fbdd64bfd5","Type":"ContainerDied","Data":"e4dfa35226de8f69d686990d31e67ffdb0bc53ac3962c07bf57184830ab65e77"} Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.341897 5010 generic.go:334] "Generic (PLEG): container finished" podID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerID="e4dfa35226de8f69d686990d31e67ffdb0bc53ac3962c07bf57184830ab65e77" exitCode=0 Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.358467 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.529693 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d92k\" (UniqueName: \"kubernetes.io/projected/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-kube-api-access-7d92k\") pod \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.531446 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-ovsdbserver-nb\") pod \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.531782 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-config\") pod \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.531942 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-dns-svc\") pod \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\" (UID: \"cc975d2c-bffd-4de4-9431-43fbdd64bfd5\") " Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.536328 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-kube-api-access-7d92k" (OuterVolumeSpecName: "kube-api-access-7d92k") pod "cc975d2c-bffd-4de4-9431-43fbdd64bfd5" (UID: "cc975d2c-bffd-4de4-9431-43fbdd64bfd5"). InnerVolumeSpecName "kube-api-access-7d92k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.538043 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.538194 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.586313 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cc975d2c-bffd-4de4-9431-43fbdd64bfd5" (UID: "cc975d2c-bffd-4de4-9431-43fbdd64bfd5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.605036 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-config" (OuterVolumeSpecName: "config") pod "cc975d2c-bffd-4de4-9431-43fbdd64bfd5" (UID: "cc975d2c-bffd-4de4-9431-43fbdd64bfd5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.606267 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cc975d2c-bffd-4de4-9431-43fbdd64bfd5" (UID: "cc975d2c-bffd-4de4-9431-43fbdd64bfd5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.634455 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.634506 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.634523 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d92k\" (UniqueName: \"kubernetes.io/projected/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-kube-api-access-7d92k\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.634543 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cc975d2c-bffd-4de4-9431-43fbdd64bfd5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:02 crc kubenswrapper[5010]: I1126 15:47:02.662998 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.355426 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.355842 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c78595c5-npwt2" event={"ID":"cc975d2c-bffd-4de4-9431-43fbdd64bfd5","Type":"ContainerDied","Data":"796457aceba4ce615fb7ac83c416dedeed773e5fc38ccea748b5ad97e3a7f185"} Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.355894 5010 scope.go:117] "RemoveContainer" containerID="e4dfa35226de8f69d686990d31e67ffdb0bc53ac3962c07bf57184830ab65e77" Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.400265 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-npwt2"] Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.402834 5010 scope.go:117] "RemoveContainer" containerID="1e6912d835f3e6556a1c6e8dab7b684c62516a3785f8ea9638e9484841283b97" Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.406933 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65c78595c5-npwt2"] Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.445430 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.552602 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:47:03 crc kubenswrapper[5010]: E1126 15:47:03.553653 5010 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 26 15:47:03 crc kubenswrapper[5010]: E1126 15:47:03.553688 5010 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 26 15:47:03 crc kubenswrapper[5010]: E1126 15:47:03.553755 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift podName:1803fc99-2cc8-44e7-8ce5-eac5bc548f88 nodeName:}" failed. No retries permitted until 2025-11-26 15:47:19.553738349 +0000 UTC m=+1260.344455497 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift") pod "swift-storage-0" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88") : configmap "swift-ring-files" not found Nov 26 15:47:03 crc kubenswrapper[5010]: I1126 15:47:03.904392 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" path="/var/lib/kubelet/pods/cc975d2c-bffd-4de4-9431-43fbdd64bfd5/volumes" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.369097 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9fea0e4-4e18-4d7e-9af0-fd46b742565c","Type":"ContainerStarted","Data":"3b98cba8078e790765a3a58a436a7c3b361b88b1f2e0cfb60098baee4f4cce2a"} Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.705443 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-ebd8-account-create-update-t7wlc"] Nov 26 15:47:04 crc kubenswrapper[5010]: E1126 15:47:04.705891 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerName="init" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.705914 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerName="init" Nov 26 15:47:04 crc kubenswrapper[5010]: E1126 15:47:04.705936 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerName="dnsmasq-dns" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.705945 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerName="dnsmasq-dns" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.706216 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc975d2c-bffd-4de4-9431-43fbdd64bfd5" containerName="dnsmasq-dns" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.706950 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.712848 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.723905 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ebd8-account-create-update-t7wlc"] Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.764977 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-pnnp6"] Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.767915 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.776382 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-pnnp6"] Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.876680 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d16011c3-075a-4cff-a221-16ed50067a9e-operator-scripts\") pod \"keystone-ebd8-account-create-update-t7wlc\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.876797 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5j77z\" (UniqueName: \"kubernetes.io/projected/d16011c3-075a-4cff-a221-16ed50067a9e-kube-api-access-5j77z\") pod \"keystone-ebd8-account-create-update-t7wlc\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.876832 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-operator-scripts\") pod \"keystone-db-create-pnnp6\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.876990 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n8c4\" (UniqueName: \"kubernetes.io/projected/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-kube-api-access-8n8c4\") pod \"keystone-db-create-pnnp6\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.979534 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5j77z\" (UniqueName: \"kubernetes.io/projected/d16011c3-075a-4cff-a221-16ed50067a9e-kube-api-access-5j77z\") pod \"keystone-ebd8-account-create-update-t7wlc\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.979623 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-operator-scripts\") pod \"keystone-db-create-pnnp6\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.979696 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n8c4\" (UniqueName: \"kubernetes.io/projected/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-kube-api-access-8n8c4\") pod \"keystone-db-create-pnnp6\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.979870 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d16011c3-075a-4cff-a221-16ed50067a9e-operator-scripts\") pod \"keystone-ebd8-account-create-update-t7wlc\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.980938 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-operator-scripts\") pod \"keystone-db-create-pnnp6\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:04 crc kubenswrapper[5010]: I1126 15:47:04.981021 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d16011c3-075a-4cff-a221-16ed50067a9e-operator-scripts\") pod \"keystone-ebd8-account-create-update-t7wlc\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.010109 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n8c4\" (UniqueName: \"kubernetes.io/projected/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-kube-api-access-8n8c4\") pod \"keystone-db-create-pnnp6\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.013096 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-86krh"] Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.014616 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.015907 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5j77z\" (UniqueName: \"kubernetes.io/projected/d16011c3-075a-4cff-a221-16ed50067a9e-kube-api-access-5j77z\") pod \"keystone-ebd8-account-create-update-t7wlc\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.025449 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.029842 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-86krh"] Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.086349 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.091667 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-0cf4-account-create-update-8zr8q"] Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.100811 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0cf4-account-create-update-8zr8q"] Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.101109 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.104132 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.182614 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ps5j\" (UniqueName: \"kubernetes.io/projected/e962487c-09d8-4b78-aec6-6ed212c3bd75-kube-api-access-4ps5j\") pod \"placement-db-create-86krh\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.182651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae8d19c8-ae98-467d-b061-856521d7029d-operator-scripts\") pod \"placement-0cf4-account-create-update-8zr8q\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.182758 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e962487c-09d8-4b78-aec6-6ed212c3bd75-operator-scripts\") pod \"placement-db-create-86krh\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.182798 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn4kb\" (UniqueName: \"kubernetes.io/projected/ae8d19c8-ae98-467d-b061-856521d7029d-kube-api-access-nn4kb\") pod \"placement-0cf4-account-create-update-8zr8q\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.284101 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn4kb\" (UniqueName: \"kubernetes.io/projected/ae8d19c8-ae98-467d-b061-856521d7029d-kube-api-access-nn4kb\") pod \"placement-0cf4-account-create-update-8zr8q\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.284227 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ps5j\" (UniqueName: \"kubernetes.io/projected/e962487c-09d8-4b78-aec6-6ed212c3bd75-kube-api-access-4ps5j\") pod \"placement-db-create-86krh\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.284260 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae8d19c8-ae98-467d-b061-856521d7029d-operator-scripts\") pod \"placement-0cf4-account-create-update-8zr8q\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.284374 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e962487c-09d8-4b78-aec6-6ed212c3bd75-operator-scripts\") pod \"placement-db-create-86krh\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.285263 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae8d19c8-ae98-467d-b061-856521d7029d-operator-scripts\") pod \"placement-0cf4-account-create-update-8zr8q\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.285405 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e962487c-09d8-4b78-aec6-6ed212c3bd75-operator-scripts\") pod \"placement-db-create-86krh\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.311182 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ps5j\" (UniqueName: \"kubernetes.io/projected/e962487c-09d8-4b78-aec6-6ed212c3bd75-kube-api-access-4ps5j\") pod \"placement-db-create-86krh\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.311280 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn4kb\" (UniqueName: \"kubernetes.io/projected/ae8d19c8-ae98-467d-b061-856521d7029d-kube-api-access-nn4kb\") pod \"placement-0cf4-account-create-update-8zr8q\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.421384 5010 generic.go:334] "Generic (PLEG): container finished" podID="260f1345-096d-4d94-901e-943c3d9e4135" containerID="b8ec1a9cdb303364bb02c0d64077536cd7666be13b0d6b8dbdc15f743fa920ab" exitCode=0 Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.421500 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hdcdl" event={"ID":"260f1345-096d-4d94-901e-943c3d9e4135","Type":"ContainerDied","Data":"b8ec1a9cdb303364bb02c0d64077536cd7666be13b0d6b8dbdc15f743fa920ab"} Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.428057 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9fea0e4-4e18-4d7e-9af0-fd46b742565c","Type":"ContainerStarted","Data":"2aa7f2cde724ae9be71611e2947e9786538808ad37c2bc8674777309a8ce98ab"} Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.429293 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.477176 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86krh" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.483166 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.495802 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.906386001 podStartE2EDuration="4.495776087s" podCreationTimestamp="2025-11-26 15:47:01 +0000 UTC" firstStartedPulling="2025-11-26 15:47:02.326134235 +0000 UTC m=+1243.116851383" lastFinishedPulling="2025-11-26 15:47:03.915524321 +0000 UTC m=+1244.706241469" observedRunningTime="2025-11-26 15:47:05.480198407 +0000 UTC m=+1246.270915555" watchObservedRunningTime="2025-11-26 15:47:05.495776087 +0000 UTC m=+1246.286493265" Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.566840 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ebd8-account-create-update-t7wlc"] Nov 26 15:47:05 crc kubenswrapper[5010]: I1126 15:47:05.612505 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-pnnp6"] Nov 26 15:47:05 crc kubenswrapper[5010]: W1126 15:47:05.629475 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55bd51a9_df41_4ab4_be2b_43dd4d776bf5.slice/crio-25f846b0779a16db21828de378c1da6c77d02af7762a2c62c4fc061015fbc0c1 WatchSource:0}: Error finding container 25f846b0779a16db21828de378c1da6c77d02af7762a2c62c4fc061015fbc0c1: Status 404 returned error can't find the container with id 25f846b0779a16db21828de378c1da6c77d02af7762a2c62c4fc061015fbc0c1 Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.128567 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-86krh"] Nov 26 15:47:06 crc kubenswrapper[5010]: W1126 15:47:06.149164 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode962487c_09d8_4b78_aec6_6ed212c3bd75.slice/crio-fad992e92048eae0f436ebcffc98a765d0362e26eb6e91a8e1ef83161381d9dc WatchSource:0}: Error finding container fad992e92048eae0f436ebcffc98a765d0362e26eb6e91a8e1ef83161381d9dc: Status 404 returned error can't find the container with id fad992e92048eae0f436ebcffc98a765d0362e26eb6e91a8e1ef83161381d9dc Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.183891 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-0cf4-account-create-update-8zr8q"] Nov 26 15:47:06 crc kubenswrapper[5010]: W1126 15:47:06.187259 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae8d19c8_ae98_467d_b061_856521d7029d.slice/crio-a8b9952af392d8f7a8b4855ecfff0cf9456e692acf3853fd1f769781d593b7b4 WatchSource:0}: Error finding container a8b9952af392d8f7a8b4855ecfff0cf9456e692acf3853fd1f769781d593b7b4: Status 404 returned error can't find the container with id a8b9952af392d8f7a8b4855ecfff0cf9456e692acf3853fd1f769781d593b7b4 Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.440151 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65356e91-f417-4d3c-8298-cd16cd182fea","Type":"ContainerStarted","Data":"974eb392789b3575b6ab91660a9720a82245412b282bb7c530b30d8feee0d0df"} Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.440842 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.442435 5010 generic.go:334] "Generic (PLEG): container finished" podID="55bd51a9-df41-4ab4-be2b-43dd4d776bf5" containerID="6eb836eb928608159721296c8ca9e0e1446f9f961fc14b0267fa617ac7a36cfc" exitCode=0 Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.442539 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pnnp6" event={"ID":"55bd51a9-df41-4ab4-be2b-43dd4d776bf5","Type":"ContainerDied","Data":"6eb836eb928608159721296c8ca9e0e1446f9f961fc14b0267fa617ac7a36cfc"} Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.442586 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pnnp6" event={"ID":"55bd51a9-df41-4ab4-be2b-43dd4d776bf5","Type":"ContainerStarted","Data":"25f846b0779a16db21828de378c1da6c77d02af7762a2c62c4fc061015fbc0c1"} Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.445265 5010 generic.go:334] "Generic (PLEG): container finished" podID="d16011c3-075a-4cff-a221-16ed50067a9e" containerID="237dca687157c8a51ce31a5eeb1e512bb81f5f3d888205c08f85c049cb9d9522" exitCode=0 Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.445338 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ebd8-account-create-update-t7wlc" event={"ID":"d16011c3-075a-4cff-a221-16ed50067a9e","Type":"ContainerDied","Data":"237dca687157c8a51ce31a5eeb1e512bb81f5f3d888205c08f85c049cb9d9522"} Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.445359 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ebd8-account-create-update-t7wlc" event={"ID":"d16011c3-075a-4cff-a221-16ed50067a9e","Type":"ContainerStarted","Data":"aaaaf3b81a1d2e83144098609bc017bd957cfcdadccbe95e5cbd6cdbb9fe0a0d"} Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.446572 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0cf4-account-create-update-8zr8q" event={"ID":"ae8d19c8-ae98-467d-b061-856521d7029d","Type":"ContainerStarted","Data":"a8b9952af392d8f7a8b4855ecfff0cf9456e692acf3853fd1f769781d593b7b4"} Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.447821 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-86krh" event={"ID":"e962487c-09d8-4b78-aec6-6ed212c3bd75","Type":"ContainerStarted","Data":"fad992e92048eae0f436ebcffc98a765d0362e26eb6e91a8e1ef83161381d9dc"} Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.469768 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=59.19646992 podStartE2EDuration="1m51.469738255s" podCreationTimestamp="2025-11-26 15:45:15 +0000 UTC" firstStartedPulling="2025-11-26 15:46:13.366962006 +0000 UTC m=+1194.157679154" lastFinishedPulling="2025-11-26 15:47:05.640230341 +0000 UTC m=+1246.430947489" observedRunningTime="2025-11-26 15:47:06.465578801 +0000 UTC m=+1247.256295959" watchObservedRunningTime="2025-11-26 15:47:06.469738255 +0000 UTC m=+1247.260455433" Nov 26 15:47:06 crc kubenswrapper[5010]: I1126 15:47:06.919149 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.033344 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnhb4\" (UniqueName: \"kubernetes.io/projected/260f1345-096d-4d94-901e-943c3d9e4135-kube-api-access-nnhb4\") pod \"260f1345-096d-4d94-901e-943c3d9e4135\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.033420 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-swiftconf\") pod \"260f1345-096d-4d94-901e-943c3d9e4135\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.033468 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-dispersionconf\") pod \"260f1345-096d-4d94-901e-943c3d9e4135\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.033572 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-scripts\") pod \"260f1345-096d-4d94-901e-943c3d9e4135\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.033629 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-ring-data-devices\") pod \"260f1345-096d-4d94-901e-943c3d9e4135\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.033698 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-combined-ca-bundle\") pod \"260f1345-096d-4d94-901e-943c3d9e4135\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.033767 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/260f1345-096d-4d94-901e-943c3d9e4135-etc-swift\") pod \"260f1345-096d-4d94-901e-943c3d9e4135\" (UID: \"260f1345-096d-4d94-901e-943c3d9e4135\") " Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.034428 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "260f1345-096d-4d94-901e-943c3d9e4135" (UID: "260f1345-096d-4d94-901e-943c3d9e4135"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.034797 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/260f1345-096d-4d94-901e-943c3d9e4135-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "260f1345-096d-4d94-901e-943c3d9e4135" (UID: "260f1345-096d-4d94-901e-943c3d9e4135"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.035696 5010 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.035753 5010 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/260f1345-096d-4d94-901e-943c3d9e4135-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.039616 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/260f1345-096d-4d94-901e-943c3d9e4135-kube-api-access-nnhb4" (OuterVolumeSpecName: "kube-api-access-nnhb4") pod "260f1345-096d-4d94-901e-943c3d9e4135" (UID: "260f1345-096d-4d94-901e-943c3d9e4135"). InnerVolumeSpecName "kube-api-access-nnhb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.044733 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "260f1345-096d-4d94-901e-943c3d9e4135" (UID: "260f1345-096d-4d94-901e-943c3d9e4135"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.057124 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "260f1345-096d-4d94-901e-943c3d9e4135" (UID: "260f1345-096d-4d94-901e-943c3d9e4135"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.060705 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "260f1345-096d-4d94-901e-943c3d9e4135" (UID: "260f1345-096d-4d94-901e-943c3d9e4135"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.061631 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-scripts" (OuterVolumeSpecName: "scripts") pod "260f1345-096d-4d94-901e-943c3d9e4135" (UID: "260f1345-096d-4d94-901e-943c3d9e4135"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.138218 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/260f1345-096d-4d94-901e-943c3d9e4135-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.138287 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.138311 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnhb4\" (UniqueName: \"kubernetes.io/projected/260f1345-096d-4d94-901e-943c3d9e4135-kube-api-access-nnhb4\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.138333 5010 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.138353 5010 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/260f1345-096d-4d94-901e-943c3d9e4135-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.457118 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-hdcdl" event={"ID":"260f1345-096d-4d94-901e-943c3d9e4135","Type":"ContainerDied","Data":"65707cacee357a5d112c160838e3b4aafc3a7b6f8b73c476b42bbfea7ad41e6c"} Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.457164 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="65707cacee357a5d112c160838e3b4aafc3a7b6f8b73c476b42bbfea7ad41e6c" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.457252 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-hdcdl" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.458947 5010 generic.go:334] "Generic (PLEG): container finished" podID="ae8d19c8-ae98-467d-b061-856521d7029d" containerID="dc1d1e75ba8a78442646d771e9dd84547cd380c72a7be91078c4b34fb518725a" exitCode=0 Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.459150 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0cf4-account-create-update-8zr8q" event={"ID":"ae8d19c8-ae98-467d-b061-856521d7029d","Type":"ContainerDied","Data":"dc1d1e75ba8a78442646d771e9dd84547cd380c72a7be91078c4b34fb518725a"} Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.460915 5010 generic.go:334] "Generic (PLEG): container finished" podID="e962487c-09d8-4b78-aec6-6ed212c3bd75" containerID="6c4760ad1a714105713a33fea21344eef7544128a9495f5d5c75c58396e438da" exitCode=0 Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.461006 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-86krh" event={"ID":"e962487c-09d8-4b78-aec6-6ed212c3bd75","Type":"ContainerDied","Data":"6c4760ad1a714105713a33fea21344eef7544128a9495f5d5c75c58396e438da"} Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.968454 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:07 crc kubenswrapper[5010]: I1126 15:47:07.976017 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.068536 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d16011c3-075a-4cff-a221-16ed50067a9e-operator-scripts\") pod \"d16011c3-075a-4cff-a221-16ed50067a9e\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.068789 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5j77z\" (UniqueName: \"kubernetes.io/projected/d16011c3-075a-4cff-a221-16ed50067a9e-kube-api-access-5j77z\") pod \"d16011c3-075a-4cff-a221-16ed50067a9e\" (UID: \"d16011c3-075a-4cff-a221-16ed50067a9e\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.070390 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d16011c3-075a-4cff-a221-16ed50067a9e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d16011c3-075a-4cff-a221-16ed50067a9e" (UID: "d16011c3-075a-4cff-a221-16ed50067a9e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.074910 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d16011c3-075a-4cff-a221-16ed50067a9e-kube-api-access-5j77z" (OuterVolumeSpecName: "kube-api-access-5j77z") pod "d16011c3-075a-4cff-a221-16ed50067a9e" (UID: "d16011c3-075a-4cff-a221-16ed50067a9e"). InnerVolumeSpecName "kube-api-access-5j77z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.170442 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-operator-scripts\") pod \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.170561 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n8c4\" (UniqueName: \"kubernetes.io/projected/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-kube-api-access-8n8c4\") pod \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\" (UID: \"55bd51a9-df41-4ab4-be2b-43dd4d776bf5\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.171018 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5j77z\" (UniqueName: \"kubernetes.io/projected/d16011c3-075a-4cff-a221-16ed50067a9e-kube-api-access-5j77z\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.171047 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d16011c3-075a-4cff-a221-16ed50067a9e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.171894 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "55bd51a9-df41-4ab4-be2b-43dd4d776bf5" (UID: "55bd51a9-df41-4ab4-be2b-43dd4d776bf5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.174453 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-kube-api-access-8n8c4" (OuterVolumeSpecName: "kube-api-access-8n8c4") pod "55bd51a9-df41-4ab4-be2b-43dd4d776bf5" (UID: "55bd51a9-df41-4ab4-be2b-43dd4d776bf5"). InnerVolumeSpecName "kube-api-access-8n8c4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.273121 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n8c4\" (UniqueName: \"kubernetes.io/projected/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-kube-api-access-8n8c4\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.273159 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/55bd51a9-df41-4ab4-be2b-43dd4d776bf5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.471117 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-pnnp6" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.471131 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-pnnp6" event={"ID":"55bd51a9-df41-4ab4-be2b-43dd4d776bf5","Type":"ContainerDied","Data":"25f846b0779a16db21828de378c1da6c77d02af7762a2c62c4fc061015fbc0c1"} Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.471211 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25f846b0779a16db21828de378c1da6c77d02af7762a2c62c4fc061015fbc0c1" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.472564 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ebd8-account-create-update-t7wlc" event={"ID":"d16011c3-075a-4cff-a221-16ed50067a9e","Type":"ContainerDied","Data":"aaaaf3b81a1d2e83144098609bc017bd957cfcdadccbe95e5cbd6cdbb9fe0a0d"} Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.472606 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aaaaf3b81a1d2e83144098609bc017bd957cfcdadccbe95e5cbd6cdbb9fe0a0d" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.472857 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ebd8-account-create-update-t7wlc" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.843676 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.850551 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86krh" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.890364 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae8d19c8-ae98-467d-b061-856521d7029d-operator-scripts\") pod \"ae8d19c8-ae98-467d-b061-856521d7029d\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.890448 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4ps5j\" (UniqueName: \"kubernetes.io/projected/e962487c-09d8-4b78-aec6-6ed212c3bd75-kube-api-access-4ps5j\") pod \"e962487c-09d8-4b78-aec6-6ed212c3bd75\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.890469 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nn4kb\" (UniqueName: \"kubernetes.io/projected/ae8d19c8-ae98-467d-b061-856521d7029d-kube-api-access-nn4kb\") pod \"ae8d19c8-ae98-467d-b061-856521d7029d\" (UID: \"ae8d19c8-ae98-467d-b061-856521d7029d\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.890497 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e962487c-09d8-4b78-aec6-6ed212c3bd75-operator-scripts\") pod \"e962487c-09d8-4b78-aec6-6ed212c3bd75\" (UID: \"e962487c-09d8-4b78-aec6-6ed212c3bd75\") " Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.891145 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae8d19c8-ae98-467d-b061-856521d7029d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ae8d19c8-ae98-467d-b061-856521d7029d" (UID: "ae8d19c8-ae98-467d-b061-856521d7029d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.891249 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e962487c-09d8-4b78-aec6-6ed212c3bd75-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e962487c-09d8-4b78-aec6-6ed212c3bd75" (UID: "e962487c-09d8-4b78-aec6-6ed212c3bd75"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.896615 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e962487c-09d8-4b78-aec6-6ed212c3bd75-kube-api-access-4ps5j" (OuterVolumeSpecName: "kube-api-access-4ps5j") pod "e962487c-09d8-4b78-aec6-6ed212c3bd75" (UID: "e962487c-09d8-4b78-aec6-6ed212c3bd75"). InnerVolumeSpecName "kube-api-access-4ps5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.896886 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae8d19c8-ae98-467d-b061-856521d7029d-kube-api-access-nn4kb" (OuterVolumeSpecName: "kube-api-access-nn4kb") pod "ae8d19c8-ae98-467d-b061-856521d7029d" (UID: "ae8d19c8-ae98-467d-b061-856521d7029d"). InnerVolumeSpecName "kube-api-access-nn4kb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.993101 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae8d19c8-ae98-467d-b061-856521d7029d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.993139 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4ps5j\" (UniqueName: \"kubernetes.io/projected/e962487c-09d8-4b78-aec6-6ed212c3bd75-kube-api-access-4ps5j\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.993152 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nn4kb\" (UniqueName: \"kubernetes.io/projected/ae8d19c8-ae98-467d-b061-856521d7029d-kube-api-access-nn4kb\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:08 crc kubenswrapper[5010]: I1126 15:47:08.993161 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e962487c-09d8-4b78-aec6-6ed212c3bd75-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.485895 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-0cf4-account-create-update-8zr8q" event={"ID":"ae8d19c8-ae98-467d-b061-856521d7029d","Type":"ContainerDied","Data":"a8b9952af392d8f7a8b4855ecfff0cf9456e692acf3853fd1f769781d593b7b4"} Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.485950 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8b9952af392d8f7a8b4855ecfff0cf9456e692acf3853fd1f769781d593b7b4" Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.485954 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-0cf4-account-create-update-8zr8q" Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.488111 5010 generic.go:334] "Generic (PLEG): container finished" podID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerID="ab6cade5267022ce5c3a9112e0b1e51b93929e7dcbe177fc49bab18f72aaf1a2" exitCode=0 Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.488160 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25","Type":"ContainerDied","Data":"ab6cade5267022ce5c3a9112e0b1e51b93929e7dcbe177fc49bab18f72aaf1a2"} Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.494511 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-86krh" event={"ID":"e962487c-09d8-4b78-aec6-6ed212c3bd75","Type":"ContainerDied","Data":"fad992e92048eae0f436ebcffc98a765d0362e26eb6e91a8e1ef83161381d9dc"} Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.494569 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fad992e92048eae0f436ebcffc98a765d0362e26eb6e91a8e1ef83161381d9dc" Nov 26 15:47:09 crc kubenswrapper[5010]: I1126 15:47:09.494588 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-86krh" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.332663 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-99648"] Nov 26 15:47:10 crc kubenswrapper[5010]: E1126 15:47:10.333698 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16011c3-075a-4cff-a221-16ed50067a9e" containerName="mariadb-account-create-update" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.333738 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16011c3-075a-4cff-a221-16ed50067a9e" containerName="mariadb-account-create-update" Nov 26 15:47:10 crc kubenswrapper[5010]: E1126 15:47:10.333750 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55bd51a9-df41-4ab4-be2b-43dd4d776bf5" containerName="mariadb-database-create" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.333758 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="55bd51a9-df41-4ab4-be2b-43dd4d776bf5" containerName="mariadb-database-create" Nov 26 15:47:10 crc kubenswrapper[5010]: E1126 15:47:10.333783 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae8d19c8-ae98-467d-b061-856521d7029d" containerName="mariadb-account-create-update" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.333796 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae8d19c8-ae98-467d-b061-856521d7029d" containerName="mariadb-account-create-update" Nov 26 15:47:10 crc kubenswrapper[5010]: E1126 15:47:10.333808 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="260f1345-096d-4d94-901e-943c3d9e4135" containerName="swift-ring-rebalance" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.333817 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="260f1345-096d-4d94-901e-943c3d9e4135" containerName="swift-ring-rebalance" Nov 26 15:47:10 crc kubenswrapper[5010]: E1126 15:47:10.333832 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e962487c-09d8-4b78-aec6-6ed212c3bd75" containerName="mariadb-database-create" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.333844 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e962487c-09d8-4b78-aec6-6ed212c3bd75" containerName="mariadb-database-create" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.334064 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d16011c3-075a-4cff-a221-16ed50067a9e" containerName="mariadb-account-create-update" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.334089 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae8d19c8-ae98-467d-b061-856521d7029d" containerName="mariadb-account-create-update" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.334101 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e962487c-09d8-4b78-aec6-6ed212c3bd75" containerName="mariadb-database-create" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.334117 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="55bd51a9-df41-4ab4-be2b-43dd4d776bf5" containerName="mariadb-database-create" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.334127 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="260f1345-096d-4d94-901e-943c3d9e4135" containerName="swift-ring-rebalance" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.334945 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.344989 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-99648"] Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.416201 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-nbrh7" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" probeResult="failure" output=< Nov 26 15:47:10 crc kubenswrapper[5010]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 15:47:10 crc kubenswrapper[5010]: > Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.439962 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-c7b0-account-create-update-z6mg5"] Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.441597 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.447748 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.451162 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c7b0-account-create-update-z6mg5"] Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.505131 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25","Type":"ContainerStarted","Data":"d3ae680aa34c0a6c9f874b61e0efe2655d40cee16f8635aa026abbab0b4ef8b8"} Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.506314 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.522963 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csmjr\" (UniqueName: \"kubernetes.io/projected/a54e75fa-7b8b-4159-840f-983ec1a40e0d-kube-api-access-csmjr\") pod \"glance-db-create-99648\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.523077 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54e75fa-7b8b-4159-840f-983ec1a40e0d-operator-scripts\") pod \"glance-db-create-99648\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.531132 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=40.121788516 podStartE2EDuration="2m0.531109357s" podCreationTimestamp="2025-11-26 15:45:10 +0000 UTC" firstStartedPulling="2025-11-26 15:45:12.488373892 +0000 UTC m=+1133.279091040" lastFinishedPulling="2025-11-26 15:46:32.897694733 +0000 UTC m=+1213.688411881" observedRunningTime="2025-11-26 15:47:10.530467161 +0000 UTC m=+1251.321184329" watchObservedRunningTime="2025-11-26 15:47:10.531109357 +0000 UTC m=+1251.321826525" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.624540 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csmjr\" (UniqueName: \"kubernetes.io/projected/a54e75fa-7b8b-4159-840f-983ec1a40e0d-kube-api-access-csmjr\") pod \"glance-db-create-99648\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.624651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqsr9\" (UniqueName: \"kubernetes.io/projected/9d447aec-9b58-4184-8f6f-2b10d849d8c0-kube-api-access-zqsr9\") pod \"glance-c7b0-account-create-update-z6mg5\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.624747 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54e75fa-7b8b-4159-840f-983ec1a40e0d-operator-scripts\") pod \"glance-db-create-99648\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.624916 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d447aec-9b58-4184-8f6f-2b10d849d8c0-operator-scripts\") pod \"glance-c7b0-account-create-update-z6mg5\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.628051 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54e75fa-7b8b-4159-840f-983ec1a40e0d-operator-scripts\") pod \"glance-db-create-99648\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.647806 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csmjr\" (UniqueName: \"kubernetes.io/projected/a54e75fa-7b8b-4159-840f-983ec1a40e0d-kube-api-access-csmjr\") pod \"glance-db-create-99648\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.668040 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-99648" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.726795 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d447aec-9b58-4184-8f6f-2b10d849d8c0-operator-scripts\") pod \"glance-c7b0-account-create-update-z6mg5\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.726919 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqsr9\" (UniqueName: \"kubernetes.io/projected/9d447aec-9b58-4184-8f6f-2b10d849d8c0-kube-api-access-zqsr9\") pod \"glance-c7b0-account-create-update-z6mg5\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.727960 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d447aec-9b58-4184-8f6f-2b10d849d8c0-operator-scripts\") pod \"glance-c7b0-account-create-update-z6mg5\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.756597 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqsr9\" (UniqueName: \"kubernetes.io/projected/9d447aec-9b58-4184-8f6f-2b10d849d8c0-kube-api-access-zqsr9\") pod \"glance-c7b0-account-create-update-z6mg5\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:10 crc kubenswrapper[5010]: I1126 15:47:10.761404 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:11 crc kubenswrapper[5010]: I1126 15:47:11.179966 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-99648"] Nov 26 15:47:11 crc kubenswrapper[5010]: W1126 15:47:11.188152 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda54e75fa_7b8b_4159_840f_983ec1a40e0d.slice/crio-eeee3e04928b9f52ccab1b0af4c979a0dba374d44ac4e757372bd492c5ab9796 WatchSource:0}: Error finding container eeee3e04928b9f52ccab1b0af4c979a0dba374d44ac4e757372bd492c5ab9796: Status 404 returned error can't find the container with id eeee3e04928b9f52ccab1b0af4c979a0dba374d44ac4e757372bd492c5ab9796 Nov 26 15:47:11 crc kubenswrapper[5010]: I1126 15:47:11.343319 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-c7b0-account-create-update-z6mg5"] Nov 26 15:47:11 crc kubenswrapper[5010]: W1126 15:47:11.351552 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d447aec_9b58_4184_8f6f_2b10d849d8c0.slice/crio-66b1e244462bff64c0dd101d1f423a3c168c08d15d8538ddf24c1f6df47a1fc6 WatchSource:0}: Error finding container 66b1e244462bff64c0dd101d1f423a3c168c08d15d8538ddf24c1f6df47a1fc6: Status 404 returned error can't find the container with id 66b1e244462bff64c0dd101d1f423a3c168c08d15d8538ddf24c1f6df47a1fc6 Nov 26 15:47:11 crc kubenswrapper[5010]: I1126 15:47:11.422552 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:47:11 crc kubenswrapper[5010]: I1126 15:47:11.422617 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:47:11 crc kubenswrapper[5010]: I1126 15:47:11.532042 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c7b0-account-create-update-z6mg5" event={"ID":"9d447aec-9b58-4184-8f6f-2b10d849d8c0","Type":"ContainerStarted","Data":"66b1e244462bff64c0dd101d1f423a3c168c08d15d8538ddf24c1f6df47a1fc6"} Nov 26 15:47:11 crc kubenswrapper[5010]: I1126 15:47:11.533775 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-99648" event={"ID":"a54e75fa-7b8b-4159-840f-983ec1a40e0d","Type":"ContainerStarted","Data":"e67acf107946bb91553ca398217826e217c2c0fbd53a70c45a76896f6169a43f"} Nov 26 15:47:11 crc kubenswrapper[5010]: I1126 15:47:11.533927 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-99648" event={"ID":"a54e75fa-7b8b-4159-840f-983ec1a40e0d","Type":"ContainerStarted","Data":"eeee3e04928b9f52ccab1b0af4c979a0dba374d44ac4e757372bd492c5ab9796"} Nov 26 15:47:12 crc kubenswrapper[5010]: I1126 15:47:12.550669 5010 generic.go:334] "Generic (PLEG): container finished" podID="9940cbe6-c323-4320-9e45-463e5c023156" containerID="fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd" exitCode=0 Nov 26 15:47:12 crc kubenswrapper[5010]: I1126 15:47:12.550787 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9940cbe6-c323-4320-9e45-463e5c023156","Type":"ContainerDied","Data":"fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd"} Nov 26 15:47:12 crc kubenswrapper[5010]: I1126 15:47:12.560880 5010 generic.go:334] "Generic (PLEG): container finished" podID="a54e75fa-7b8b-4159-840f-983ec1a40e0d" containerID="e67acf107946bb91553ca398217826e217c2c0fbd53a70c45a76896f6169a43f" exitCode=0 Nov 26 15:47:12 crc kubenswrapper[5010]: I1126 15:47:12.560964 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-99648" event={"ID":"a54e75fa-7b8b-4159-840f-983ec1a40e0d","Type":"ContainerDied","Data":"e67acf107946bb91553ca398217826e217c2c0fbd53a70c45a76896f6169a43f"} Nov 26 15:47:12 crc kubenswrapper[5010]: I1126 15:47:12.562875 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c7b0-account-create-update-z6mg5" event={"ID":"9d447aec-9b58-4184-8f6f-2b10d849d8c0","Type":"ContainerStarted","Data":"16e4286df9dc4da934acb63de73bc22d543350bd8f03c6db3553cb0bf829316f"} Nov 26 15:47:12 crc kubenswrapper[5010]: I1126 15:47:12.629629 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-c7b0-account-create-update-z6mg5" podStartSLOduration=2.6296051990000002 podStartE2EDuration="2.629605199s" podCreationTimestamp="2025-11-26 15:47:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:47:12.62246005 +0000 UTC m=+1253.413177218" watchObservedRunningTime="2025-11-26 15:47:12.629605199 +0000 UTC m=+1253.420322347" Nov 26 15:47:13 crc kubenswrapper[5010]: I1126 15:47:13.573135 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9940cbe6-c323-4320-9e45-463e5c023156","Type":"ContainerStarted","Data":"e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6"} Nov 26 15:47:13 crc kubenswrapper[5010]: I1126 15:47:13.576539 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 15:47:13 crc kubenswrapper[5010]: I1126 15:47:13.580063 5010 generic.go:334] "Generic (PLEG): container finished" podID="9d447aec-9b58-4184-8f6f-2b10d849d8c0" containerID="16e4286df9dc4da934acb63de73bc22d543350bd8f03c6db3553cb0bf829316f" exitCode=0 Nov 26 15:47:13 crc kubenswrapper[5010]: I1126 15:47:13.580154 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c7b0-account-create-update-z6mg5" event={"ID":"9d447aec-9b58-4184-8f6f-2b10d849d8c0","Type":"ContainerDied","Data":"16e4286df9dc4da934acb63de73bc22d543350bd8f03c6db3553cb0bf829316f"} Nov 26 15:47:13 crc kubenswrapper[5010]: I1126 15:47:13.609464 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=43.642713945 podStartE2EDuration="2m4.609437015s" podCreationTimestamp="2025-11-26 15:45:09 +0000 UTC" firstStartedPulling="2025-11-26 15:45:11.958023855 +0000 UTC m=+1132.748741003" lastFinishedPulling="2025-11-26 15:46:32.924746925 +0000 UTC m=+1213.715464073" observedRunningTime="2025-11-26 15:47:13.602951362 +0000 UTC m=+1254.393668510" watchObservedRunningTime="2025-11-26 15:47:13.609437015 +0000 UTC m=+1254.400154183" Nov 26 15:47:13 crc kubenswrapper[5010]: I1126 15:47:13.926532 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-99648" Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.089828 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54e75fa-7b8b-4159-840f-983ec1a40e0d-operator-scripts\") pod \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.090132 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csmjr\" (UniqueName: \"kubernetes.io/projected/a54e75fa-7b8b-4159-840f-983ec1a40e0d-kube-api-access-csmjr\") pod \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\" (UID: \"a54e75fa-7b8b-4159-840f-983ec1a40e0d\") " Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.090502 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a54e75fa-7b8b-4159-840f-983ec1a40e0d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a54e75fa-7b8b-4159-840f-983ec1a40e0d" (UID: "a54e75fa-7b8b-4159-840f-983ec1a40e0d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.090850 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a54e75fa-7b8b-4159-840f-983ec1a40e0d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.134043 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a54e75fa-7b8b-4159-840f-983ec1a40e0d-kube-api-access-csmjr" (OuterVolumeSpecName: "kube-api-access-csmjr") pod "a54e75fa-7b8b-4159-840f-983ec1a40e0d" (UID: "a54e75fa-7b8b-4159-840f-983ec1a40e0d"). InnerVolumeSpecName "kube-api-access-csmjr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.192297 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csmjr\" (UniqueName: \"kubernetes.io/projected/a54e75fa-7b8b-4159-840f-983ec1a40e0d-kube-api-access-csmjr\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.592302 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-99648" event={"ID":"a54e75fa-7b8b-4159-840f-983ec1a40e0d","Type":"ContainerDied","Data":"eeee3e04928b9f52ccab1b0af4c979a0dba374d44ac4e757372bd492c5ab9796"} Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.592368 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eeee3e04928b9f52ccab1b0af4c979a0dba374d44ac4e757372bd492c5ab9796" Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.592494 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-99648" Nov 26 15:47:14 crc kubenswrapper[5010]: I1126 15:47:14.975420 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.113726 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d447aec-9b58-4184-8f6f-2b10d849d8c0-operator-scripts\") pod \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.113882 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqsr9\" (UniqueName: \"kubernetes.io/projected/9d447aec-9b58-4184-8f6f-2b10d849d8c0-kube-api-access-zqsr9\") pod \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\" (UID: \"9d447aec-9b58-4184-8f6f-2b10d849d8c0\") " Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.114828 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d447aec-9b58-4184-8f6f-2b10d849d8c0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9d447aec-9b58-4184-8f6f-2b10d849d8c0" (UID: "9d447aec-9b58-4184-8f6f-2b10d849d8c0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.121216 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d447aec-9b58-4184-8f6f-2b10d849d8c0-kube-api-access-zqsr9" (OuterVolumeSpecName: "kube-api-access-zqsr9") pod "9d447aec-9b58-4184-8f6f-2b10d849d8c0" (UID: "9d447aec-9b58-4184-8f6f-2b10d849d8c0"). InnerVolumeSpecName "kube-api-access-zqsr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.215880 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9d447aec-9b58-4184-8f6f-2b10d849d8c0-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.215924 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqsr9\" (UniqueName: \"kubernetes.io/projected/9d447aec-9b58-4184-8f6f-2b10d849d8c0-kube-api-access-zqsr9\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.397812 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-nbrh7" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" probeResult="failure" output=< Nov 26 15:47:15 crc kubenswrapper[5010]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 15:47:15 crc kubenswrapper[5010]: > Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.619519 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-c7b0-account-create-update-z6mg5" event={"ID":"9d447aec-9b58-4184-8f6f-2b10d849d8c0","Type":"ContainerDied","Data":"66b1e244462bff64c0dd101d1f423a3c168c08d15d8538ddf24c1f6df47a1fc6"} Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.619583 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66b1e244462bff64c0dd101d1f423a3c168c08d15d8538ddf24c1f6df47a1fc6" Nov 26 15:47:15 crc kubenswrapper[5010]: I1126 15:47:15.619628 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-c7b0-account-create-update-z6mg5" Nov 26 15:47:16 crc kubenswrapper[5010]: I1126 15:47:16.636490 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 15:47:16 crc kubenswrapper[5010]: I1126 15:47:16.878236 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 15:47:19 crc kubenswrapper[5010]: I1126 15:47:19.605632 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:47:19 crc kubenswrapper[5010]: I1126 15:47:19.616843 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"swift-storage-0\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " pod="openstack/swift-storage-0" Nov 26 15:47:19 crc kubenswrapper[5010]: I1126 15:47:19.772029 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.397383 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-nbrh7" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" probeResult="failure" output=< Nov 26 15:47:20 crc kubenswrapper[5010]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 15:47:20 crc kubenswrapper[5010]: > Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.530916 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 26 15:47:20 crc kubenswrapper[5010]: W1126 15:47:20.533261 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1803fc99_2cc8_44e7_8ce5_eac5bc548f88.slice/crio-3d1170e746304bb58d50df547965f6cd5bb72f3a501665cfdb4dafcff6dcc456 WatchSource:0}: Error finding container 3d1170e746304bb58d50df547965f6cd5bb72f3a501665cfdb4dafcff6dcc456: Status 404 returned error can't find the container with id 3d1170e746304bb58d50df547965f6cd5bb72f3a501665cfdb4dafcff6dcc456 Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.607993 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-p4jwm"] Nov 26 15:47:20 crc kubenswrapper[5010]: E1126 15:47:20.608385 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d447aec-9b58-4184-8f6f-2b10d849d8c0" containerName="mariadb-account-create-update" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.608398 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d447aec-9b58-4184-8f6f-2b10d849d8c0" containerName="mariadb-account-create-update" Nov 26 15:47:20 crc kubenswrapper[5010]: E1126 15:47:20.608413 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a54e75fa-7b8b-4159-840f-983ec1a40e0d" containerName="mariadb-database-create" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.608420 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a54e75fa-7b8b-4159-840f-983ec1a40e0d" containerName="mariadb-database-create" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.608607 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d447aec-9b58-4184-8f6f-2b10d849d8c0" containerName="mariadb-account-create-update" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.608637 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a54e75fa-7b8b-4159-840f-983ec1a40e0d" containerName="mariadb-database-create" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.609297 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.614153 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-q8pkh" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.614177 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.633458 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-p4jwm"] Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.674237 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"3d1170e746304bb58d50df547965f6cd5bb72f3a501665cfdb4dafcff6dcc456"} Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.735426 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-db-sync-config-data\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.735509 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-combined-ca-bundle\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.735530 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-config-data\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.735563 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zl9mt\" (UniqueName: \"kubernetes.io/projected/ea113023-3903-4ab3-b036-80328c6ba6ca-kube-api-access-zl9mt\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.837191 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-combined-ca-bundle\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.837470 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-config-data\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.837516 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zl9mt\" (UniqueName: \"kubernetes.io/projected/ea113023-3903-4ab3-b036-80328c6ba6ca-kube-api-access-zl9mt\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.837615 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-db-sync-config-data\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.843347 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-combined-ca-bundle\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.843918 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-db-sync-config-data\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.844383 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-config-data\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.856043 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zl9mt\" (UniqueName: \"kubernetes.io/projected/ea113023-3903-4ab3-b036-80328c6ba6ca-kube-api-access-zl9mt\") pod \"glance-db-sync-p4jwm\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:20 crc kubenswrapper[5010]: I1126 15:47:20.952450 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-p4jwm" Nov 26 15:47:21 crc kubenswrapper[5010]: I1126 15:47:21.466098 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-p4jwm"] Nov 26 15:47:21 crc kubenswrapper[5010]: I1126 15:47:21.683946 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-p4jwm" event={"ID":"ea113023-3903-4ab3-b036-80328c6ba6ca","Type":"ContainerStarted","Data":"b753596b7b5c9a3f6dce7633b35853bdb3d506105bc09550795f89ffe9cbef2e"} Nov 26 15:47:21 crc kubenswrapper[5010]: I1126 15:47:21.955097 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:47:23 crc kubenswrapper[5010]: I1126 15:47:23.701846 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"df9dfb68b38080d2f2517a40a46d8ae91eb3eca11c141ff220a21e22ce48690a"} Nov 26 15:47:23 crc kubenswrapper[5010]: I1126 15:47:23.702260 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"a92b03349dda704cc51977b5cdd2fcdd40871b506d74796925290a6da4ceb86e"} Nov 26 15:47:23 crc kubenswrapper[5010]: I1126 15:47:23.702272 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"55e059be841df7938e11264822fec73874738f64ef6b875efb95510e6965cf1b"} Nov 26 15:47:24 crc kubenswrapper[5010]: I1126 15:47:24.717264 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"2315d69e082e6c260094225fe89d5d8817821a2dcf66915354208ff345c9a274"} Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.395719 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-nbrh7" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" probeResult="failure" output=< Nov 26 15:47:25 crc kubenswrapper[5010]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 26 15:47:25 crc kubenswrapper[5010]: > Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.422484 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.436770 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.808014 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-nbrh7-config-7d7sl"] Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.810040 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.815698 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.820474 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-nbrh7-config-7d7sl"] Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.935542 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fn69v\" (UniqueName: \"kubernetes.io/projected/4429dea4-1692-4aac-8eeb-3c2e206de857-kube-api-access-fn69v\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.936019 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.936082 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run-ovn\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.936182 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-scripts\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.936293 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-additional-scripts\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:25 crc kubenswrapper[5010]: I1126 15:47:25.936580 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-log-ovn\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.038415 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-log-ovn\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.038490 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.038517 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fn69v\" (UniqueName: \"kubernetes.io/projected/4429dea4-1692-4aac-8eeb-3c2e206de857-kube-api-access-fn69v\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.038550 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run-ovn\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.038589 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-scripts\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.038687 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-additional-scripts\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.039108 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run-ovn\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.039138 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.039215 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-log-ovn\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.039871 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-additional-scripts\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.041747 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-scripts\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.061750 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fn69v\" (UniqueName: \"kubernetes.io/projected/4429dea4-1692-4aac-8eeb-3c2e206de857-kube-api-access-fn69v\") pod \"ovn-controller-nbrh7-config-7d7sl\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.126472 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:26 crc kubenswrapper[5010]: I1126 15:47:26.615763 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-nbrh7-config-7d7sl"] Nov 26 15:47:27 crc kubenswrapper[5010]: I1126 15:47:27.773895 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7-config-7d7sl" event={"ID":"4429dea4-1692-4aac-8eeb-3c2e206de857","Type":"ContainerDied","Data":"7919c80b2f17a78bdb933b9cc3b17600120fda6ef4e7cec81c2f04537e3abc35"} Nov 26 15:47:27 crc kubenswrapper[5010]: I1126 15:47:27.773891 5010 generic.go:334] "Generic (PLEG): container finished" podID="4429dea4-1692-4aac-8eeb-3c2e206de857" containerID="7919c80b2f17a78bdb933b9cc3b17600120fda6ef4e7cec81c2f04537e3abc35" exitCode=0 Nov 26 15:47:27 crc kubenswrapper[5010]: I1126 15:47:27.775624 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7-config-7d7sl" event={"ID":"4429dea4-1692-4aac-8eeb-3c2e206de857","Type":"ContainerStarted","Data":"08c0665d6f37021fb1232f97da5369b934cb0da8fb10479b26a57a3e4d34b970"} Nov 26 15:47:27 crc kubenswrapper[5010]: I1126 15:47:27.780559 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"6d794e589ecc207f0a022410f47d3aa359d8e1b3c5503eda2b2b369e69a171cf"} Nov 26 15:47:27 crc kubenswrapper[5010]: I1126 15:47:27.780620 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"18b9616512ee9afb2cfc002c2a3a4b7c6722774ff0238f548f51aad7f1e695a8"} Nov 26 15:47:28 crc kubenswrapper[5010]: I1126 15:47:28.794086 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"4b0402574e5cf70154b6681989bbdbd847b3e31c0811a89c6cfcc7aaf711a5f1"} Nov 26 15:47:30 crc kubenswrapper[5010]: I1126 15:47:30.401160 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-nbrh7" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.416556 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.779897 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-rnnz9"] Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.781074 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.792990 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-rnnz9"] Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.867900 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-bknm6"] Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.868828 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94dxd\" (UniqueName: \"kubernetes.io/projected/1fe9c714-9055-4b2a-b417-f24e02a47fac-kube-api-access-94dxd\") pod \"cinder-db-create-rnnz9\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.868892 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fe9c714-9055-4b2a-b417-f24e02a47fac-operator-scripts\") pod \"cinder-db-create-rnnz9\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.869131 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.887845 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-bknm6"] Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.916588 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-920e-account-create-update-zgc6z"] Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.917655 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.918081 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-920e-account-create-update-zgc6z"] Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.920300 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.973260 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-operator-scripts\") pod \"barbican-920e-account-create-update-zgc6z\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.973312 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxxnm\" (UniqueName: \"kubernetes.io/projected/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-kube-api-access-kxxnm\") pod \"barbican-920e-account-create-update-zgc6z\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.973361 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs7f4\" (UniqueName: \"kubernetes.io/projected/bbb49afd-179d-425f-aeb1-64a64c66fb98-kube-api-access-vs7f4\") pod \"barbican-db-create-bknm6\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.973427 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94dxd\" (UniqueName: \"kubernetes.io/projected/1fe9c714-9055-4b2a-b417-f24e02a47fac-kube-api-access-94dxd\") pod \"cinder-db-create-rnnz9\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.973473 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fe9c714-9055-4b2a-b417-f24e02a47fac-operator-scripts\") pod \"cinder-db-create-rnnz9\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.973507 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb49afd-179d-425f-aeb1-64a64c66fb98-operator-scripts\") pod \"barbican-db-create-bknm6\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.975686 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fe9c714-9055-4b2a-b417-f24e02a47fac-operator-scripts\") pod \"cinder-db-create-rnnz9\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.990372 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-3420-account-create-update-xs8x7"] Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.991818 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:31 crc kubenswrapper[5010]: I1126 15:47:31.997262 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.002436 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3420-account-create-update-xs8x7"] Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.034601 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94dxd\" (UniqueName: \"kubernetes.io/projected/1fe9c714-9055-4b2a-b417-f24e02a47fac-kube-api-access-94dxd\") pod \"cinder-db-create-rnnz9\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.074325 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb49afd-179d-425f-aeb1-64a64c66fb98-operator-scripts\") pod \"barbican-db-create-bknm6\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.074386 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmfj9\" (UniqueName: \"kubernetes.io/projected/62328141-677e-41a9-84ae-413c9b3ce15a-kube-api-access-mmfj9\") pod \"cinder-3420-account-create-update-xs8x7\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.074415 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62328141-677e-41a9-84ae-413c9b3ce15a-operator-scripts\") pod \"cinder-3420-account-create-update-xs8x7\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.074447 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-operator-scripts\") pod \"barbican-920e-account-create-update-zgc6z\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.074475 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxxnm\" (UniqueName: \"kubernetes.io/projected/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-kube-api-access-kxxnm\") pod \"barbican-920e-account-create-update-zgc6z\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.074662 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs7f4\" (UniqueName: \"kubernetes.io/projected/bbb49afd-179d-425f-aeb1-64a64c66fb98-kube-api-access-vs7f4\") pod \"barbican-db-create-bknm6\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.075137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb49afd-179d-425f-aeb1-64a64c66fb98-operator-scripts\") pod \"barbican-db-create-bknm6\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.075446 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-operator-scripts\") pod \"barbican-920e-account-create-update-zgc6z\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.102130 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxxnm\" (UniqueName: \"kubernetes.io/projected/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-kube-api-access-kxxnm\") pod \"barbican-920e-account-create-update-zgc6z\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.102954 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs7f4\" (UniqueName: \"kubernetes.io/projected/bbb49afd-179d-425f-aeb1-64a64c66fb98-kube-api-access-vs7f4\") pod \"barbican-db-create-bknm6\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.107076 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rnnz9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.176200 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmfj9\" (UniqueName: \"kubernetes.io/projected/62328141-677e-41a9-84ae-413c9b3ce15a-kube-api-access-mmfj9\") pod \"cinder-3420-account-create-update-xs8x7\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.176260 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62328141-677e-41a9-84ae-413c9b3ce15a-operator-scripts\") pod \"cinder-3420-account-create-update-xs8x7\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.177172 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62328141-677e-41a9-84ae-413c9b3ce15a-operator-scripts\") pod \"cinder-3420-account-create-update-xs8x7\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.177746 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-vmqg9"] Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.179138 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.181772 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.182075 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xzc9m" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.182284 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.182316 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.193551 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-vmqg9"] Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.205234 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bknm6" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.205234 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmfj9\" (UniqueName: \"kubernetes.io/projected/62328141-677e-41a9-84ae-413c9b3ce15a-kube-api-access-mmfj9\") pod \"cinder-3420-account-create-update-xs8x7\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.240045 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.253038 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-qgvnw"] Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.254048 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.269676 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qgvnw"] Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.278288 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-config-data\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.278346 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tj7nq\" (UniqueName: \"kubernetes.io/projected/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-kube-api-access-tj7nq\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.278390 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-combined-ca-bundle\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.278422 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpg8x\" (UniqueName: \"kubernetes.io/projected/4fa0a723-c228-4246-a4de-6718bd2be270-kube-api-access-wpg8x\") pod \"neutron-db-create-qgvnw\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.278448 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fa0a723-c228-4246-a4de-6718bd2be270-operator-scripts\") pod \"neutron-db-create-qgvnw\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.288817 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-4616-account-create-update-tsdsw"] Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.290005 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.293365 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.308312 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4616-account-create-update-tsdsw"] Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.359465 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.380304 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-config-data\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.380396 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tj7nq\" (UniqueName: \"kubernetes.io/projected/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-kube-api-access-tj7nq\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.380500 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-combined-ca-bundle\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.380554 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpg8x\" (UniqueName: \"kubernetes.io/projected/4fa0a723-c228-4246-a4de-6718bd2be270-kube-api-access-wpg8x\") pod \"neutron-db-create-qgvnw\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.380604 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fa0a723-c228-4246-a4de-6718bd2be270-operator-scripts\") pod \"neutron-db-create-qgvnw\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.380793 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6ad10a8-9fed-45ae-830a-01f1b3147cae-operator-scripts\") pod \"neutron-4616-account-create-update-tsdsw\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.380845 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cg9t4\" (UniqueName: \"kubernetes.io/projected/e6ad10a8-9fed-45ae-830a-01f1b3147cae-kube-api-access-cg9t4\") pod \"neutron-4616-account-create-update-tsdsw\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.381577 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fa0a723-c228-4246-a4de-6718bd2be270-operator-scripts\") pod \"neutron-db-create-qgvnw\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.386280 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-config-data\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.386840 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-combined-ca-bundle\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.397352 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpg8x\" (UniqueName: \"kubernetes.io/projected/4fa0a723-c228-4246-a4de-6718bd2be270-kube-api-access-wpg8x\") pod \"neutron-db-create-qgvnw\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.400264 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tj7nq\" (UniqueName: \"kubernetes.io/projected/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-kube-api-access-tj7nq\") pod \"keystone-db-sync-vmqg9\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.493044 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6ad10a8-9fed-45ae-830a-01f1b3147cae-operator-scripts\") pod \"neutron-4616-account-create-update-tsdsw\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.493136 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cg9t4\" (UniqueName: \"kubernetes.io/projected/e6ad10a8-9fed-45ae-830a-01f1b3147cae-kube-api-access-cg9t4\") pod \"neutron-4616-account-create-update-tsdsw\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.493980 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6ad10a8-9fed-45ae-830a-01f1b3147cae-operator-scripts\") pod \"neutron-4616-account-create-update-tsdsw\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.509574 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cg9t4\" (UniqueName: \"kubernetes.io/projected/e6ad10a8-9fed-45ae-830a-01f1b3147cae-kube-api-access-cg9t4\") pod \"neutron-4616-account-create-update-tsdsw\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.555538 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.577622 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qgvnw" Nov 26 15:47:32 crc kubenswrapper[5010]: I1126 15:47:32.617218 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:47:33 crc kubenswrapper[5010]: E1126 15:47:33.359668 5010 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.154:49074->38.102.83.154:42721: write tcp 38.102.83.154:49074->38.102.83.154:42721: write: broken pipe Nov 26 15:47:38 crc kubenswrapper[5010]: I1126 15:47:38.886140 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927448 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run\") pod \"4429dea4-1692-4aac-8eeb-3c2e206de857\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927498 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fn69v\" (UniqueName: \"kubernetes.io/projected/4429dea4-1692-4aac-8eeb-3c2e206de857-kube-api-access-fn69v\") pod \"4429dea4-1692-4aac-8eeb-3c2e206de857\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927556 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run" (OuterVolumeSpecName: "var-run") pod "4429dea4-1692-4aac-8eeb-3c2e206de857" (UID: "4429dea4-1692-4aac-8eeb-3c2e206de857"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927585 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run-ovn\") pod \"4429dea4-1692-4aac-8eeb-3c2e206de857\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927622 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "4429dea4-1692-4aac-8eeb-3c2e206de857" (UID: "4429dea4-1692-4aac-8eeb-3c2e206de857"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-log-ovn\") pod \"4429dea4-1692-4aac-8eeb-3c2e206de857\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927896 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-additional-scripts\") pod \"4429dea4-1692-4aac-8eeb-3c2e206de857\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.927939 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-scripts\") pod \"4429dea4-1692-4aac-8eeb-3c2e206de857\" (UID: \"4429dea4-1692-4aac-8eeb-3c2e206de857\") " Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.928650 5010 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.928667 5010 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.931528 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-scripts" (OuterVolumeSpecName: "scripts") pod "4429dea4-1692-4aac-8eeb-3c2e206de857" (UID: "4429dea4-1692-4aac-8eeb-3c2e206de857"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.932161 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "4429dea4-1692-4aac-8eeb-3c2e206de857" (UID: "4429dea4-1692-4aac-8eeb-3c2e206de857"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.933877 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "4429dea4-1692-4aac-8eeb-3c2e206de857" (UID: "4429dea4-1692-4aac-8eeb-3c2e206de857"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.965437 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4429dea4-1692-4aac-8eeb-3c2e206de857-kube-api-access-fn69v" (OuterVolumeSpecName: "kube-api-access-fn69v") pod "4429dea4-1692-4aac-8eeb-3c2e206de857" (UID: "4429dea4-1692-4aac-8eeb-3c2e206de857"). InnerVolumeSpecName "kube-api-access-fn69v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.974481 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7-config-7d7sl" event={"ID":"4429dea4-1692-4aac-8eeb-3c2e206de857","Type":"ContainerDied","Data":"08c0665d6f37021fb1232f97da5369b934cb0da8fb10479b26a57a3e4d34b970"} Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.974525 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08c0665d6f37021fb1232f97da5369b934cb0da8fb10479b26a57a3e4d34b970" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:38.974646 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-7d7sl" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:39.030890 5010 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/4429dea4-1692-4aac-8eeb-3c2e206de857-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:39.030928 5010 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:39.030941 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4429dea4-1692-4aac-8eeb-3c2e206de857-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:39.030950 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fn69v\" (UniqueName: \"kubernetes.io/projected/4429dea4-1692-4aac-8eeb-3c2e206de857-kube-api-access-fn69v\") on node \"crc\" DevicePath \"\"" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:39.992016 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"e0678f8f20e1d205632c07cb24e8ce9e89576b47c8ef44f378b9a0a0dfb4ed62"} Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.045765 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-nbrh7-config-7d7sl"] Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.052821 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-nbrh7-config-7d7sl"] Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.144247 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-nbrh7-config-pst9f"] Nov 26 15:47:53 crc kubenswrapper[5010]: E1126 15:47:40.144648 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4429dea4-1692-4aac-8eeb-3c2e206de857" containerName="ovn-config" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.144662 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4429dea4-1692-4aac-8eeb-3c2e206de857" containerName="ovn-config" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.144903 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="4429dea4-1692-4aac-8eeb-3c2e206de857" containerName="ovn-config" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.145549 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.148913 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.151750 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-nbrh7-config-pst9f"] Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.252564 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run-ovn\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.252618 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-additional-scripts\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.252658 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-scripts\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.252719 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rz7m\" (UniqueName: \"kubernetes.io/projected/32564067-f76a-44ee-a8f9-cab59df2d49d-kube-api-access-8rz7m\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.252745 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.252777 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-log-ovn\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354371 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run-ovn\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354438 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-additional-scripts\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354493 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-scripts\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354560 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rz7m\" (UniqueName: \"kubernetes.io/projected/32564067-f76a-44ee-a8f9-cab59df2d49d-kube-api-access-8rz7m\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354601 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354673 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-log-ovn\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354933 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354945 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run-ovn\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.354934 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-log-ovn\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.356038 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-additional-scripts\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.359914 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-scripts\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.386743 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rz7m\" (UniqueName: \"kubernetes.io/projected/32564067-f76a-44ee-a8f9-cab59df2d49d-kube-api-access-8rz7m\") pod \"ovn-controller-nbrh7-config-pst9f\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:40.488619 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:41.423015 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:41.423336 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:41.911834 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4429dea4-1692-4aac-8eeb-3c2e206de857" path="/var/lib/kubelet/pods/4429dea4-1692-4aac-8eeb-3c2e206de857/volumes" Nov 26 15:47:53 crc kubenswrapper[5010]: E1126 15:47:45.966064 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29" Nov 26 15:47:53 crc kubenswrapper[5010]: E1126 15:47:45.967286 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-zl9mt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-p4jwm_openstack(ea113023-3903-4ab3-b036-80328c6ba6ca): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:47:53 crc kubenswrapper[5010]: E1126 15:47:45.968922 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-p4jwm" podUID="ea113023-3903-4ab3-b036-80328c6ba6ca" Nov 26 15:47:53 crc kubenswrapper[5010]: E1126 15:47:46.057920 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29\\\"\"" pod="openstack/glance-db-sync-p4jwm" podUID="ea113023-3903-4ab3-b036-80328c6ba6ca" Nov 26 15:47:53 crc kubenswrapper[5010]: I1126 15:47:53.922776 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-3420-account-create-update-xs8x7"] Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.047606 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-nbrh7-config-pst9f"] Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.059281 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-vmqg9"] Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.069158 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-bknm6"] Nov 26 15:47:54 crc kubenswrapper[5010]: W1126 15:47:54.077531 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbbb49afd_179d_425f_aeb1_64a64c66fb98.slice/crio-1f7d42e1f88ef9988b23071e2411ebdfe76ac2d69feaeae2b26a613b90d3bca4 WatchSource:0}: Error finding container 1f7d42e1f88ef9988b23071e2411ebdfe76ac2d69feaeae2b26a613b90d3bca4: Status 404 returned error can't find the container with id 1f7d42e1f88ef9988b23071e2411ebdfe76ac2d69feaeae2b26a613b90d3bca4 Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.080094 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-rnnz9"] Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.096526 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-qgvnw"] Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.105083 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-920e-account-create-update-zgc6z"] Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.122353 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4616-account-create-update-tsdsw"] Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.155685 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bknm6" event={"ID":"bbb49afd-179d-425f-aeb1-64a64c66fb98","Type":"ContainerStarted","Data":"1f7d42e1f88ef9988b23071e2411ebdfe76ac2d69feaeae2b26a613b90d3bca4"} Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.157254 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7-config-pst9f" event={"ID":"32564067-f76a-44ee-a8f9-cab59df2d49d","Type":"ContainerStarted","Data":"ba4c8c8f1d56b68aa745cf0c9a2857acf8e1d9f9c55f0a413971223c4c44ff5f"} Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.159299 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vmqg9" event={"ID":"5c7c983d-9ff5-40ac-a5a7-4945f350afb3","Type":"ContainerStarted","Data":"e7109a321963e7e180ad53ba30bd84581bd60d7fe27696fb6c610e39d20c2010"} Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.160403 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3420-account-create-update-xs8x7" event={"ID":"62328141-677e-41a9-84ae-413c9b3ce15a","Type":"ContainerStarted","Data":"b5747bf805f8b5dbfc177751c9dc6313d90e394bd2a04fda793ef4a88d84ea44"} Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.161231 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-920e-account-create-update-zgc6z" event={"ID":"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69","Type":"ContainerStarted","Data":"24e0caa84cbadd5561f3f96ea4b244bcc726511ff066395dc3b053c094e835ea"} Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.161968 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rnnz9" event={"ID":"1fe9c714-9055-4b2a-b417-f24e02a47fac","Type":"ContainerStarted","Data":"fe759cc6c3e353e4d4cf2ceb05a75319dedf6d1b91a52909a242937f6debdd7e"} Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.162884 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qgvnw" event={"ID":"4fa0a723-c228-4246-a4de-6718bd2be270","Type":"ContainerStarted","Data":"ae5f632534af146eba343f1b751a98d9c1f17e17bfff1f52a6cca67853827bcb"} Nov 26 15:47:54 crc kubenswrapper[5010]: I1126 15:47:54.164379 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4616-account-create-update-tsdsw" event={"ID":"e6ad10a8-9fed-45ae-830a-01f1b3147cae","Type":"ContainerStarted","Data":"49cc32937203c04fef081a23f76d55fbe3f06d2637913b14f585967ea1571298"} Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.196545 5010 generic.go:334] "Generic (PLEG): container finished" podID="e6ad10a8-9fed-45ae-830a-01f1b3147cae" containerID="607a96fde7603b28f08e865d2b7295908a4e57642edd8a9f9b3bbac6fb68fb8e" exitCode=0 Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.196670 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4616-account-create-update-tsdsw" event={"ID":"e6ad10a8-9fed-45ae-830a-01f1b3147cae","Type":"ContainerDied","Data":"607a96fde7603b28f08e865d2b7295908a4e57642edd8a9f9b3bbac6fb68fb8e"} Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.211172 5010 generic.go:334] "Generic (PLEG): container finished" podID="bbb49afd-179d-425f-aeb1-64a64c66fb98" containerID="cc1a141c6ceffeefcff4849801770b463e32e9bcbe7a5f8c4d3291a2d50429bb" exitCode=0 Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.211408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bknm6" event={"ID":"bbb49afd-179d-425f-aeb1-64a64c66fb98","Type":"ContainerDied","Data":"cc1a141c6ceffeefcff4849801770b463e32e9bcbe7a5f8c4d3291a2d50429bb"} Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.213351 5010 generic.go:334] "Generic (PLEG): container finished" podID="32564067-f76a-44ee-a8f9-cab59df2d49d" containerID="82c66190a4a384271219bce14f7eecdaa1199745d98b7a809afea4bb7e1faf9b" exitCode=0 Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.213409 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7-config-pst9f" event={"ID":"32564067-f76a-44ee-a8f9-cab59df2d49d","Type":"ContainerDied","Data":"82c66190a4a384271219bce14f7eecdaa1199745d98b7a809afea4bb7e1faf9b"} Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.221730 5010 generic.go:334] "Generic (PLEG): container finished" podID="62328141-677e-41a9-84ae-413c9b3ce15a" containerID="8c6358d9a35379339618996fe36ec60f4eec6869c7729dabfb9f4d68803d62e7" exitCode=0 Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.222020 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3420-account-create-update-xs8x7" event={"ID":"62328141-677e-41a9-84ae-413c9b3ce15a","Type":"ContainerDied","Data":"8c6358d9a35379339618996fe36ec60f4eec6869c7729dabfb9f4d68803d62e7"} Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.227839 5010 generic.go:334] "Generic (PLEG): container finished" podID="a36bfb95-ac5b-44ff-8b33-5f2e10ebea69" containerID="5adeb336a30ef297498044077e878f62a25ad2a35cf050ca984209cff33f4a8f" exitCode=0 Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.227949 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-920e-account-create-update-zgc6z" event={"ID":"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69","Type":"ContainerDied","Data":"5adeb336a30ef297498044077e878f62a25ad2a35cf050ca984209cff33f4a8f"} Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.229261 5010 generic.go:334] "Generic (PLEG): container finished" podID="1fe9c714-9055-4b2a-b417-f24e02a47fac" containerID="d4472dd857fa51dd467e2749e5440204642d3267b40603be780d958033a111c7" exitCode=0 Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.229307 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rnnz9" event={"ID":"1fe9c714-9055-4b2a-b417-f24e02a47fac","Type":"ContainerDied","Data":"d4472dd857fa51dd467e2749e5440204642d3267b40603be780d958033a111c7"} Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.230538 5010 generic.go:334] "Generic (PLEG): container finished" podID="4fa0a723-c228-4246-a4de-6718bd2be270" containerID="167726dce406cc31ff231ebe5b4368ed3c91a0e2e0f481ef182f176efe9ce00d" exitCode=0 Nov 26 15:47:55 crc kubenswrapper[5010]: I1126 15:47:55.230559 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qgvnw" event={"ID":"4fa0a723-c228-4246-a4de-6718bd2be270","Type":"ContainerDied","Data":"167726dce406cc31ff231ebe5b4368ed3c91a0e2e0f481ef182f176efe9ce00d"} Nov 26 15:47:56 crc kubenswrapper[5010]: I1126 15:47:56.245750 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"75089565aaa9cf8b99c1bbb2c38ff4c538bc9761ad1f7d65a1db0333de3c360e"} Nov 26 15:47:56 crc kubenswrapper[5010]: I1126 15:47:56.246229 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"1227084a08d26738373e26d1eaa54ec1c0e0c92d3d3601f6a05af2770c69551e"} Nov 26 15:47:56 crc kubenswrapper[5010]: I1126 15:47:56.246254 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"ec35df7082d1bd361a74495ef68869fe5465b44b7de7cab15bbe9c7d46d0924f"} Nov 26 15:47:57 crc kubenswrapper[5010]: I1126 15:47:57.259918 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"b290a5f7ec51985b250b6f158fb41d40ac9ddeab529cc0032fbce6f190f4fde3"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.129901 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.136750 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rnnz9" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.144651 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.174427 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.185681 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qgvnw" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.203266 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bknm6" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.210646 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273154 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fe9c714-9055-4b2a-b417-f24e02a47fac-operator-scripts\") pod \"1fe9c714-9055-4b2a-b417-f24e02a47fac\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273246 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxxnm\" (UniqueName: \"kubernetes.io/projected/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-kube-api-access-kxxnm\") pod \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273286 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run\") pod \"32564067-f76a-44ee-a8f9-cab59df2d49d\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273336 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8rz7m\" (UniqueName: \"kubernetes.io/projected/32564067-f76a-44ee-a8f9-cab59df2d49d-kube-api-access-8rz7m\") pod \"32564067-f76a-44ee-a8f9-cab59df2d49d\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273515 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94dxd\" (UniqueName: \"kubernetes.io/projected/1fe9c714-9055-4b2a-b417-f24e02a47fac-kube-api-access-94dxd\") pod \"1fe9c714-9055-4b2a-b417-f24e02a47fac\" (UID: \"1fe9c714-9055-4b2a-b417-f24e02a47fac\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273600 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run-ovn\") pod \"32564067-f76a-44ee-a8f9-cab59df2d49d\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273631 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-log-ovn\") pod \"32564067-f76a-44ee-a8f9-cab59df2d49d\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273660 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62328141-677e-41a9-84ae-413c9b3ce15a-operator-scripts\") pod \"62328141-677e-41a9-84ae-413c9b3ce15a\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273720 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-scripts\") pod \"32564067-f76a-44ee-a8f9-cab59df2d49d\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273747 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-additional-scripts\") pod \"32564067-f76a-44ee-a8f9-cab59df2d49d\" (UID: \"32564067-f76a-44ee-a8f9-cab59df2d49d\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273765 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-operator-scripts\") pod \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\" (UID: \"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273819 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmfj9\" (UniqueName: \"kubernetes.io/projected/62328141-677e-41a9-84ae-413c9b3ce15a-kube-api-access-mmfj9\") pod \"62328141-677e-41a9-84ae-413c9b3ce15a\" (UID: \"62328141-677e-41a9-84ae-413c9b3ce15a\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.273948 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "32564067-f76a-44ee-a8f9-cab59df2d49d" (UID: "32564067-f76a-44ee-a8f9-cab59df2d49d"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.274695 5010 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.275237 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "32564067-f76a-44ee-a8f9-cab59df2d49d" (UID: "32564067-f76a-44ee-a8f9-cab59df2d49d"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.275469 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "32564067-f76a-44ee-a8f9-cab59df2d49d" (UID: "32564067-f76a-44ee-a8f9-cab59df2d49d"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.275826 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-scripts" (OuterVolumeSpecName: "scripts") pod "32564067-f76a-44ee-a8f9-cab59df2d49d" (UID: "32564067-f76a-44ee-a8f9-cab59df2d49d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.275842 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62328141-677e-41a9-84ae-413c9b3ce15a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "62328141-677e-41a9-84ae-413c9b3ce15a" (UID: "62328141-677e-41a9-84ae-413c9b3ce15a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.275913 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run" (OuterVolumeSpecName: "var-run") pod "32564067-f76a-44ee-a8f9-cab59df2d49d" (UID: "32564067-f76a-44ee-a8f9-cab59df2d49d"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.276242 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a36bfb95-ac5b-44ff-8b33-5f2e10ebea69" (UID: "a36bfb95-ac5b-44ff-8b33-5f2e10ebea69"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.277473 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1fe9c714-9055-4b2a-b417-f24e02a47fac-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1fe9c714-9055-4b2a-b417-f24e02a47fac" (UID: "1fe9c714-9055-4b2a-b417-f24e02a47fac"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.283106 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62328141-677e-41a9-84ae-413c9b3ce15a-kube-api-access-mmfj9" (OuterVolumeSpecName: "kube-api-access-mmfj9") pod "62328141-677e-41a9-84ae-413c9b3ce15a" (UID: "62328141-677e-41a9-84ae-413c9b3ce15a"). InnerVolumeSpecName "kube-api-access-mmfj9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.285653 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-kube-api-access-kxxnm" (OuterVolumeSpecName: "kube-api-access-kxxnm") pod "a36bfb95-ac5b-44ff-8b33-5f2e10ebea69" (UID: "a36bfb95-ac5b-44ff-8b33-5f2e10ebea69"). InnerVolumeSpecName "kube-api-access-kxxnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.286171 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32564067-f76a-44ee-a8f9-cab59df2d49d-kube-api-access-8rz7m" (OuterVolumeSpecName: "kube-api-access-8rz7m") pod "32564067-f76a-44ee-a8f9-cab59df2d49d" (UID: "32564067-f76a-44ee-a8f9-cab59df2d49d"). InnerVolumeSpecName "kube-api-access-8rz7m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.289066 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1fe9c714-9055-4b2a-b417-f24e02a47fac-kube-api-access-94dxd" (OuterVolumeSpecName: "kube-api-access-94dxd") pod "1fe9c714-9055-4b2a-b417-f24e02a47fac" (UID: "1fe9c714-9055-4b2a-b417-f24e02a47fac"). InnerVolumeSpecName "kube-api-access-94dxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.345242 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4616-account-create-update-tsdsw" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.345247 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4616-account-create-update-tsdsw" event={"ID":"e6ad10a8-9fed-45ae-830a-01f1b3147cae","Type":"ContainerDied","Data":"49cc32937203c04fef081a23f76d55fbe3f06d2637913b14f585967ea1571298"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.346222 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49cc32937203c04fef081a23f76d55fbe3f06d2637913b14f585967ea1571298" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.353809 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"2aac1aac86049fceb0d32a0aa7530aacebb03989a907a006110f6386991013b9"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.355635 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-bknm6" event={"ID":"bbb49afd-179d-425f-aeb1-64a64c66fb98","Type":"ContainerDied","Data":"1f7d42e1f88ef9988b23071e2411ebdfe76ac2d69feaeae2b26a613b90d3bca4"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.355660 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-bknm6" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.355679 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f7d42e1f88ef9988b23071e2411ebdfe76ac2d69feaeae2b26a613b90d3bca4" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.357952 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7-config-pst9f" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.358726 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7-config-pst9f" event={"ID":"32564067-f76a-44ee-a8f9-cab59df2d49d","Type":"ContainerDied","Data":"ba4c8c8f1d56b68aa745cf0c9a2857acf8e1d9f9c55f0a413971223c4c44ff5f"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.358764 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba4c8c8f1d56b68aa745cf0c9a2857acf8e1d9f9c55f0a413971223c4c44ff5f" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.359808 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-3420-account-create-update-xs8x7" event={"ID":"62328141-677e-41a9-84ae-413c9b3ce15a","Type":"ContainerDied","Data":"b5747bf805f8b5dbfc177751c9dc6313d90e394bd2a04fda793ef4a88d84ea44"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.359833 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5747bf805f8b5dbfc177751c9dc6313d90e394bd2a04fda793ef4a88d84ea44" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.359885 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-3420-account-create-update-xs8x7" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.365019 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-920e-account-create-update-zgc6z" event={"ID":"a36bfb95-ac5b-44ff-8b33-5f2e10ebea69","Type":"ContainerDied","Data":"24e0caa84cbadd5561f3f96ea4b244bcc726511ff066395dc3b053c094e835ea"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.365074 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="24e0caa84cbadd5561f3f96ea4b244bcc726511ff066395dc3b053c094e835ea" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.365148 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-920e-account-create-update-zgc6z" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.368004 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-rnnz9" event={"ID":"1fe9c714-9055-4b2a-b417-f24e02a47fac","Type":"ContainerDied","Data":"fe759cc6c3e353e4d4cf2ceb05a75319dedf6d1b91a52909a242937f6debdd7e"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.368150 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe759cc6c3e353e4d4cf2ceb05a75319dedf6d1b91a52909a242937f6debdd7e" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.368047 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-rnnz9" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.372854 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-qgvnw" event={"ID":"4fa0a723-c228-4246-a4de-6718bd2be270","Type":"ContainerDied","Data":"ae5f632534af146eba343f1b751a98d9c1f17e17bfff1f52a6cca67853827bcb"} Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.372882 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ae5f632534af146eba343f1b751a98d9c1f17e17bfff1f52a6cca67853827bcb" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.372929 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-qgvnw" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.375629 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs7f4\" (UniqueName: \"kubernetes.io/projected/bbb49afd-179d-425f-aeb1-64a64c66fb98-kube-api-access-vs7f4\") pod \"bbb49afd-179d-425f-aeb1-64a64c66fb98\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.375700 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fa0a723-c228-4246-a4de-6718bd2be270-operator-scripts\") pod \"4fa0a723-c228-4246-a4de-6718bd2be270\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.375748 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb49afd-179d-425f-aeb1-64a64c66fb98-operator-scripts\") pod \"bbb49afd-179d-425f-aeb1-64a64c66fb98\" (UID: \"bbb49afd-179d-425f-aeb1-64a64c66fb98\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.375839 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6ad10a8-9fed-45ae-830a-01f1b3147cae-operator-scripts\") pod \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.375897 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpg8x\" (UniqueName: \"kubernetes.io/projected/4fa0a723-c228-4246-a4de-6718bd2be270-kube-api-access-wpg8x\") pod \"4fa0a723-c228-4246-a4de-6718bd2be270\" (UID: \"4fa0a723-c228-4246-a4de-6718bd2be270\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.375934 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cg9t4\" (UniqueName: \"kubernetes.io/projected/e6ad10a8-9fed-45ae-830a-01f1b3147cae-kube-api-access-cg9t4\") pod \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\" (UID: \"e6ad10a8-9fed-45ae-830a-01f1b3147cae\") " Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376283 5010 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376298 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376307 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmfj9\" (UniqueName: \"kubernetes.io/projected/62328141-677e-41a9-84ae-413c9b3ce15a-kube-api-access-mmfj9\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376318 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1fe9c714-9055-4b2a-b417-f24e02a47fac-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376327 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxxnm\" (UniqueName: \"kubernetes.io/projected/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69-kube-api-access-kxxnm\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376349 5010 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376360 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8rz7m\" (UniqueName: \"kubernetes.io/projected/32564067-f76a-44ee-a8f9-cab59df2d49d-kube-api-access-8rz7m\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376373 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94dxd\" (UniqueName: \"kubernetes.io/projected/1fe9c714-9055-4b2a-b417-f24e02a47fac-kube-api-access-94dxd\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376382 5010 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/32564067-f76a-44ee-a8f9-cab59df2d49d-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376391 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62328141-677e-41a9-84ae-413c9b3ce15a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.376400 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/32564067-f76a-44ee-a8f9-cab59df2d49d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.377483 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6ad10a8-9fed-45ae-830a-01f1b3147cae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e6ad10a8-9fed-45ae-830a-01f1b3147cae" (UID: "e6ad10a8-9fed-45ae-830a-01f1b3147cae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.378078 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbb49afd-179d-425f-aeb1-64a64c66fb98-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bbb49afd-179d-425f-aeb1-64a64c66fb98" (UID: "bbb49afd-179d-425f-aeb1-64a64c66fb98"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.378535 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fa0a723-c228-4246-a4de-6718bd2be270-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4fa0a723-c228-4246-a4de-6718bd2be270" (UID: "4fa0a723-c228-4246-a4de-6718bd2be270"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.380894 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6ad10a8-9fed-45ae-830a-01f1b3147cae-kube-api-access-cg9t4" (OuterVolumeSpecName: "kube-api-access-cg9t4") pod "e6ad10a8-9fed-45ae-830a-01f1b3147cae" (UID: "e6ad10a8-9fed-45ae-830a-01f1b3147cae"). InnerVolumeSpecName "kube-api-access-cg9t4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.382220 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fa0a723-c228-4246-a4de-6718bd2be270-kube-api-access-wpg8x" (OuterVolumeSpecName: "kube-api-access-wpg8x") pod "4fa0a723-c228-4246-a4de-6718bd2be270" (UID: "4fa0a723-c228-4246-a4de-6718bd2be270"). InnerVolumeSpecName "kube-api-access-wpg8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.389913 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbb49afd-179d-425f-aeb1-64a64c66fb98-kube-api-access-vs7f4" (OuterVolumeSpecName: "kube-api-access-vs7f4") pod "bbb49afd-179d-425f-aeb1-64a64c66fb98" (UID: "bbb49afd-179d-425f-aeb1-64a64c66fb98"). InnerVolumeSpecName "kube-api-access-vs7f4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.481355 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpg8x\" (UniqueName: \"kubernetes.io/projected/4fa0a723-c228-4246-a4de-6718bd2be270-kube-api-access-wpg8x\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.481398 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cg9t4\" (UniqueName: \"kubernetes.io/projected/e6ad10a8-9fed-45ae-830a-01f1b3147cae-kube-api-access-cg9t4\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.481413 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs7f4\" (UniqueName: \"kubernetes.io/projected/bbb49afd-179d-425f-aeb1-64a64c66fb98-kube-api-access-vs7f4\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.481430 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4fa0a723-c228-4246-a4de-6718bd2be270-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.481445 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bbb49afd-179d-425f-aeb1-64a64c66fb98-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:05 crc kubenswrapper[5010]: I1126 15:48:05.481460 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e6ad10a8-9fed-45ae-830a-01f1b3147cae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.227813 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-nbrh7-config-pst9f"] Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.237671 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-nbrh7-config-pst9f"] Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.383670 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-p4jwm" event={"ID":"ea113023-3903-4ab3-b036-80328c6ba6ca","Type":"ContainerStarted","Data":"eef72c8ede1b706c5fd5317ea82d316b542309243d7659a4a8f9d540f0aeff1c"} Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.391537 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"e08bba5d0a854ba8aa4fb7af34e20011b90f803f2ae0c820fde74890a8ed506d"} Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.391574 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerStarted","Data":"956193edff3817c0a6aaac66e75e2a2cbc0c70d7f96f5cf29968a35548725373"} Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.394015 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vmqg9" event={"ID":"5c7c983d-9ff5-40ac-a5a7-4945f350afb3","Type":"ContainerStarted","Data":"6bc9f8469188874f795d80b8019d6100b4a9920b1743b2e5c4b536a83dc31f8e"} Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.411573 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-p4jwm" podStartSLOduration=2.400181042 podStartE2EDuration="46.411555972s" podCreationTimestamp="2025-11-26 15:47:20 +0000 UTC" firstStartedPulling="2025-11-26 15:47:21.47719084 +0000 UTC m=+1262.267907988" lastFinishedPulling="2025-11-26 15:48:05.48856575 +0000 UTC m=+1306.279282918" observedRunningTime="2025-11-26 15:48:06.405696986 +0000 UTC m=+1307.196414134" watchObservedRunningTime="2025-11-26 15:48:06.411555972 +0000 UTC m=+1307.202273120" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.421912 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-vmqg9" podStartSLOduration=22.989971035 podStartE2EDuration="34.421897101s" podCreationTimestamp="2025-11-26 15:47:32 +0000 UTC" firstStartedPulling="2025-11-26 15:47:54.057772742 +0000 UTC m=+1294.848489890" lastFinishedPulling="2025-11-26 15:48:05.489698798 +0000 UTC m=+1306.280415956" observedRunningTime="2025-11-26 15:48:06.418219159 +0000 UTC m=+1307.208936307" watchObservedRunningTime="2025-11-26 15:48:06.421897101 +0000 UTC m=+1307.212614249" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.465959 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=46.10263771 podStartE2EDuration="1m20.465934083s" podCreationTimestamp="2025-11-26 15:46:46 +0000 UTC" firstStartedPulling="2025-11-26 15:47:20.535435138 +0000 UTC m=+1261.326152296" lastFinishedPulling="2025-11-26 15:47:54.898731511 +0000 UTC m=+1295.689448669" observedRunningTime="2025-11-26 15:48:06.45860787 +0000 UTC m=+1307.249325028" watchObservedRunningTime="2025-11-26 15:48:06.465934083 +0000 UTC m=+1307.256651231" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.780379 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-684f7c765c-ssxjs"] Nov 26 15:48:06 crc kubenswrapper[5010]: E1126 15:48:06.781218 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6ad10a8-9fed-45ae-830a-01f1b3147cae" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781242 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6ad10a8-9fed-45ae-830a-01f1b3147cae" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: E1126 15:48:06.781262 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a36bfb95-ac5b-44ff-8b33-5f2e10ebea69" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781271 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a36bfb95-ac5b-44ff-8b33-5f2e10ebea69" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: E1126 15:48:06.781282 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62328141-677e-41a9-84ae-413c9b3ce15a" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781290 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="62328141-677e-41a9-84ae-413c9b3ce15a" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: E1126 15:48:06.781304 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32564067-f76a-44ee-a8f9-cab59df2d49d" containerName="ovn-config" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781312 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="32564067-f76a-44ee-a8f9-cab59df2d49d" containerName="ovn-config" Nov 26 15:48:06 crc kubenswrapper[5010]: E1126 15:48:06.781332 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbb49afd-179d-425f-aeb1-64a64c66fb98" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781339 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbb49afd-179d-425f-aeb1-64a64c66fb98" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: E1126 15:48:06.781363 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1fe9c714-9055-4b2a-b417-f24e02a47fac" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781376 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1fe9c714-9055-4b2a-b417-f24e02a47fac" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: E1126 15:48:06.781393 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fa0a723-c228-4246-a4de-6718bd2be270" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781401 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fa0a723-c228-4246-a4de-6718bd2be270" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781628 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6ad10a8-9fed-45ae-830a-01f1b3147cae" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781660 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a36bfb95-ac5b-44ff-8b33-5f2e10ebea69" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781675 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1fe9c714-9055-4b2a-b417-f24e02a47fac" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781691 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="62328141-677e-41a9-84ae-413c9b3ce15a" containerName="mariadb-account-create-update" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781725 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="32564067-f76a-44ee-a8f9-cab59df2d49d" containerName="ovn-config" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781737 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fa0a723-c228-4246-a4de-6718bd2be270" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.781751 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbb49afd-179d-425f-aeb1-64a64c66fb98" containerName="mariadb-database-create" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.783057 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.787568 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.804361 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-684f7c765c-ssxjs"] Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.915822 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-sb\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.915907 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-config\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.915982 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-swift-storage-0\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.916014 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-svc\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.916046 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-nb\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:06 crc kubenswrapper[5010]: I1126 15:48:06.916074 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slcwj\" (UniqueName: \"kubernetes.io/projected/673c6759-4cfb-4eb1-9e13-81b318c258ff-kube-api-access-slcwj\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.017988 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-sb\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.018100 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-config\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.018236 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-swift-storage-0\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.018282 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-svc\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.018326 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-nb\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.018374 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slcwj\" (UniqueName: \"kubernetes.io/projected/673c6759-4cfb-4eb1-9e13-81b318c258ff-kube-api-access-slcwj\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.020248 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-sb\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.021169 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-config\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.023430 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-nb\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.024154 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-swift-storage-0\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.024586 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-svc\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.050262 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slcwj\" (UniqueName: \"kubernetes.io/projected/673c6759-4cfb-4eb1-9e13-81b318c258ff-kube-api-access-slcwj\") pod \"dnsmasq-dns-684f7c765c-ssxjs\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.120024 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.433594 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-684f7c765c-ssxjs"] Nov 26 15:48:07 crc kubenswrapper[5010]: W1126 15:48:07.449167 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod673c6759_4cfb_4eb1_9e13_81b318c258ff.slice/crio-80e4a27749d3d0cb4a65270b07cef11790f5563e16b5ed4cbacde52328053fc6 WatchSource:0}: Error finding container 80e4a27749d3d0cb4a65270b07cef11790f5563e16b5ed4cbacde52328053fc6: Status 404 returned error can't find the container with id 80e4a27749d3d0cb4a65270b07cef11790f5563e16b5ed4cbacde52328053fc6 Nov 26 15:48:07 crc kubenswrapper[5010]: I1126 15:48:07.903342 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32564067-f76a-44ee-a8f9-cab59df2d49d" path="/var/lib/kubelet/pods/32564067-f76a-44ee-a8f9-cab59df2d49d/volumes" Nov 26 15:48:08 crc kubenswrapper[5010]: I1126 15:48:08.438762 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" event={"ID":"673c6759-4cfb-4eb1-9e13-81b318c258ff","Type":"ContainerStarted","Data":"80e4a27749d3d0cb4a65270b07cef11790f5563e16b5ed4cbacde52328053fc6"} Nov 26 15:48:10 crc kubenswrapper[5010]: I1126 15:48:10.461298 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" event={"ID":"673c6759-4cfb-4eb1-9e13-81b318c258ff","Type":"ContainerStarted","Data":"79f30bb2ec9f2ad28f78ee8fcc8869a8c2530704a78f3d15b7d8d177801d0d7d"} Nov 26 15:48:11 crc kubenswrapper[5010]: I1126 15:48:11.422957 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:48:11 crc kubenswrapper[5010]: I1126 15:48:11.423022 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:48:11 crc kubenswrapper[5010]: I1126 15:48:11.423072 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:48:11 crc kubenswrapper[5010]: I1126 15:48:11.423865 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"74af0b7ad1bdddc342c1daa4543b045a23faf8e3bd5f2a3ae5f6ba14cafd4e61"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:48:11 crc kubenswrapper[5010]: I1126 15:48:11.423936 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://74af0b7ad1bdddc342c1daa4543b045a23faf8e3bd5f2a3ae5f6ba14cafd4e61" gracePeriod=600 Nov 26 15:48:11 crc kubenswrapper[5010]: I1126 15:48:11.474233 5010 generic.go:334] "Generic (PLEG): container finished" podID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerID="79f30bb2ec9f2ad28f78ee8fcc8869a8c2530704a78f3d15b7d8d177801d0d7d" exitCode=0 Nov 26 15:48:11 crc kubenswrapper[5010]: I1126 15:48:11.474479 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" event={"ID":"673c6759-4cfb-4eb1-9e13-81b318c258ff","Type":"ContainerDied","Data":"79f30bb2ec9f2ad28f78ee8fcc8869a8c2530704a78f3d15b7d8d177801d0d7d"} Nov 26 15:48:12 crc kubenswrapper[5010]: I1126 15:48:12.484964 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="74af0b7ad1bdddc342c1daa4543b045a23faf8e3bd5f2a3ae5f6ba14cafd4e61" exitCode=0 Nov 26 15:48:12 crc kubenswrapper[5010]: I1126 15:48:12.485051 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"74af0b7ad1bdddc342c1daa4543b045a23faf8e3bd5f2a3ae5f6ba14cafd4e61"} Nov 26 15:48:12 crc kubenswrapper[5010]: I1126 15:48:12.486252 5010 scope.go:117] "RemoveContainer" containerID="59f84423fa85afba142264d8718184fcb64f0d905168b9c5b86ca7f3cd897062" Nov 26 15:48:12 crc kubenswrapper[5010]: I1126 15:48:12.488038 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" event={"ID":"673c6759-4cfb-4eb1-9e13-81b318c258ff","Type":"ContainerStarted","Data":"d07adbf8132600c4f8b5f7ca9c691c8503c55f456d7270b7060a9c6d3dfafa76"} Nov 26 15:48:13 crc kubenswrapper[5010]: I1126 15:48:13.500431 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:13 crc kubenswrapper[5010]: I1126 15:48:13.521643 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" podStartSLOduration=7.5216200010000005 podStartE2EDuration="7.521620001s" podCreationTimestamp="2025-11-26 15:48:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:48:13.515363754 +0000 UTC m=+1314.306080902" watchObservedRunningTime="2025-11-26 15:48:13.521620001 +0000 UTC m=+1314.312337149" Nov 26 15:48:14 crc kubenswrapper[5010]: I1126 15:48:14.513642 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5"} Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.120911 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.185051 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2mg29"] Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.185377 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" podUID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerName="dnsmasq-dns" containerID="cri-o://a60cce7f5232cf4a19c7a095215a9f4426de0e9578f05c344c30cee1c1b6ea05" gracePeriod=10 Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.540785 5010 generic.go:334] "Generic (PLEG): container finished" podID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerID="a60cce7f5232cf4a19c7a095215a9f4426de0e9578f05c344c30cee1c1b6ea05" exitCode=0 Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.540873 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" event={"ID":"29b615e2-07e5-4456-93e6-1e2a2c5c8a38","Type":"ContainerDied","Data":"a60cce7f5232cf4a19c7a095215a9f4426de0e9578f05c344c30cee1c1b6ea05"} Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.645442 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.738555 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-dns-svc\") pod \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.738785 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-config\") pod \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.738837 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-sb\") pod \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.738881 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-nb\") pod \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.738930 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46tlv\" (UniqueName: \"kubernetes.io/projected/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-kube-api-access-46tlv\") pod \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\" (UID: \"29b615e2-07e5-4456-93e6-1e2a2c5c8a38\") " Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.745921 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-kube-api-access-46tlv" (OuterVolumeSpecName: "kube-api-access-46tlv") pod "29b615e2-07e5-4456-93e6-1e2a2c5c8a38" (UID: "29b615e2-07e5-4456-93e6-1e2a2c5c8a38"). InnerVolumeSpecName "kube-api-access-46tlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.790345 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-config" (OuterVolumeSpecName: "config") pod "29b615e2-07e5-4456-93e6-1e2a2c5c8a38" (UID: "29b615e2-07e5-4456-93e6-1e2a2c5c8a38"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.793686 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "29b615e2-07e5-4456-93e6-1e2a2c5c8a38" (UID: "29b615e2-07e5-4456-93e6-1e2a2c5c8a38"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.794494 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "29b615e2-07e5-4456-93e6-1e2a2c5c8a38" (UID: "29b615e2-07e5-4456-93e6-1e2a2c5c8a38"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.813537 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "29b615e2-07e5-4456-93e6-1e2a2c5c8a38" (UID: "29b615e2-07e5-4456-93e6-1e2a2c5c8a38"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.842922 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46tlv\" (UniqueName: \"kubernetes.io/projected/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-kube-api-access-46tlv\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.842979 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.842993 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.843007 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:17 crc kubenswrapper[5010]: I1126 15:48:17.843020 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/29b615e2-07e5-4456-93e6-1e2a2c5c8a38-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:18 crc kubenswrapper[5010]: I1126 15:48:18.571029 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" event={"ID":"29b615e2-07e5-4456-93e6-1e2a2c5c8a38","Type":"ContainerDied","Data":"a9377803eac38c9d9037b5b0a8577d9e9fed76a0f50516eb7562147a5504e9f9"} Nov 26 15:48:18 crc kubenswrapper[5010]: I1126 15:48:18.571621 5010 scope.go:117] "RemoveContainer" containerID="a60cce7f5232cf4a19c7a095215a9f4426de0e9578f05c344c30cee1c1b6ea05" Nov 26 15:48:18 crc kubenswrapper[5010]: I1126 15:48:18.571947 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-2mg29" Nov 26 15:48:18 crc kubenswrapper[5010]: I1126 15:48:18.600785 5010 scope.go:117] "RemoveContainer" containerID="d999b41463033977ca3cd35b81d04bae4e6f76bc7a11b714ab21ed87a6d0c4ff" Nov 26 15:48:18 crc kubenswrapper[5010]: I1126 15:48:18.600928 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2mg29"] Nov 26 15:48:18 crc kubenswrapper[5010]: I1126 15:48:18.611386 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-2mg29"] Nov 26 15:48:19 crc kubenswrapper[5010]: I1126 15:48:19.902676 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" path="/var/lib/kubelet/pods/29b615e2-07e5-4456-93e6-1e2a2c5c8a38/volumes" Nov 26 15:48:20 crc kubenswrapper[5010]: I1126 15:48:20.589601 5010 generic.go:334] "Generic (PLEG): container finished" podID="5c7c983d-9ff5-40ac-a5a7-4945f350afb3" containerID="6bc9f8469188874f795d80b8019d6100b4a9920b1743b2e5c4b536a83dc31f8e" exitCode=0 Nov 26 15:48:20 crc kubenswrapper[5010]: I1126 15:48:20.589689 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vmqg9" event={"ID":"5c7c983d-9ff5-40ac-a5a7-4945f350afb3","Type":"ContainerDied","Data":"6bc9f8469188874f795d80b8019d6100b4a9920b1743b2e5c4b536a83dc31f8e"} Nov 26 15:48:21 crc kubenswrapper[5010]: I1126 15:48:21.972414 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.124771 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-combined-ca-bundle\") pod \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.125317 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-config-data\") pod \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.125460 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tj7nq\" (UniqueName: \"kubernetes.io/projected/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-kube-api-access-tj7nq\") pod \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\" (UID: \"5c7c983d-9ff5-40ac-a5a7-4945f350afb3\") " Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.131393 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-kube-api-access-tj7nq" (OuterVolumeSpecName: "kube-api-access-tj7nq") pod "5c7c983d-9ff5-40ac-a5a7-4945f350afb3" (UID: "5c7c983d-9ff5-40ac-a5a7-4945f350afb3"). InnerVolumeSpecName "kube-api-access-tj7nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.160883 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c7c983d-9ff5-40ac-a5a7-4945f350afb3" (UID: "5c7c983d-9ff5-40ac-a5a7-4945f350afb3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.202827 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-config-data" (OuterVolumeSpecName: "config-data") pod "5c7c983d-9ff5-40ac-a5a7-4945f350afb3" (UID: "5c7c983d-9ff5-40ac-a5a7-4945f350afb3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.243995 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tj7nq\" (UniqueName: \"kubernetes.io/projected/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-kube-api-access-tj7nq\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.244078 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.244119 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7c983d-9ff5-40ac-a5a7-4945f350afb3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.612582 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-vmqg9" event={"ID":"5c7c983d-9ff5-40ac-a5a7-4945f350afb3","Type":"ContainerDied","Data":"e7109a321963e7e180ad53ba30bd84581bd60d7fe27696fb6c610e39d20c2010"} Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.612648 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7109a321963e7e180ad53ba30bd84581bd60d7fe27696fb6c610e39d20c2010" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.612668 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-vmqg9" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.915431 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-9m8zr"] Nov 26 15:48:22 crc kubenswrapper[5010]: E1126 15:48:22.916404 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerName="init" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.916495 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerName="init" Nov 26 15:48:22 crc kubenswrapper[5010]: E1126 15:48:22.916561 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerName="dnsmasq-dns" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.916608 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerName="dnsmasq-dns" Nov 26 15:48:22 crc kubenswrapper[5010]: E1126 15:48:22.916684 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c7c983d-9ff5-40ac-a5a7-4945f350afb3" containerName="keystone-db-sync" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.916762 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c7c983d-9ff5-40ac-a5a7-4945f350afb3" containerName="keystone-db-sync" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.917014 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c7c983d-9ff5-40ac-a5a7-4945f350afb3" containerName="keystone-db-sync" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.917090 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="29b615e2-07e5-4456-93e6-1e2a2c5c8a38" containerName="dnsmasq-dns" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.917793 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.920559 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.920647 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.920700 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.920999 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xzc9m" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.927562 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.945225 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75d6d95d77-4vjng"] Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.947068 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.959778 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-9m8zr"] Nov 26 15:48:22 crc kubenswrapper[5010]: I1126 15:48:22.977330 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75d6d95d77-4vjng"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.058961 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-nb\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059028 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-fernet-keys\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059060 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-combined-ca-bundle\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059272 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059307 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-svc\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059337 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6nx6\" (UniqueName: \"kubernetes.io/projected/a364b16e-a34f-4d0f-a657-40dd81c788f9-kube-api-access-x6nx6\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059358 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6wdl\" (UniqueName: \"kubernetes.io/projected/7ddd2fe5-be14-425c-86e9-46c89fac4067-kube-api-access-w6wdl\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059398 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-sb\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059423 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-swift-storage-0\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.059486 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-credential-keys\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.060492 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-scripts\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.060641 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-config-data\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.128004 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.130207 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.134008 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.134045 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.146651 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.162698 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-nb\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.162949 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-fernet-keys\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.163037 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-combined-ca-bundle\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.163111 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.163184 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-svc\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.163282 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6nx6\" (UniqueName: \"kubernetes.io/projected/a364b16e-a34f-4d0f-a657-40dd81c788f9-kube-api-access-x6nx6\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.164434 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-svc\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.164498 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.164488 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-nb\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.163774 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6wdl\" (UniqueName: \"kubernetes.io/projected/7ddd2fe5-be14-425c-86e9-46c89fac4067-kube-api-access-w6wdl\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.164804 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-sb\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.164897 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-swift-storage-0\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.165017 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-credential-keys\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.165089 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-scripts\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.165155 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-config-data\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.167499 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-swift-storage-0\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.168120 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-sb\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.170638 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-config-data\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.171691 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-combined-ca-bundle\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.181517 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-fernet-keys\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.186999 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6nx6\" (UniqueName: \"kubernetes.io/projected/a364b16e-a34f-4d0f-a657-40dd81c788f9-kube-api-access-x6nx6\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.187047 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-scripts\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.188411 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-credential-keys\") pod \"keystone-bootstrap-9m8zr\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.194032 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-h78d6"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.197434 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6wdl\" (UniqueName: \"kubernetes.io/projected/7ddd2fe5-be14-425c-86e9-46c89fac4067-kube-api-access-w6wdl\") pod \"dnsmasq-dns-75d6d95d77-4vjng\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.198994 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.205593 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.205812 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.206350 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j5x4k" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.213501 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-h78d6"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.268700 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-scripts\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.268791 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-config-data\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.268789 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-hgfkn"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.268835 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.268865 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-run-httpd\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.268967 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rn4k\" (UniqueName: \"kubernetes.io/projected/dd3c6ef7-71bd-4191-b26a-b56464ec9772-kube-api-access-9rn4k\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.269006 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.269041 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-log-httpd\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.269231 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.270365 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.278533 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nq4j6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.278915 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.279090 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.287875 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-x7zvb"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.293058 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.298071 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.314392 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-hgfkn"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.314881 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-v9t4g" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.315159 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.322846 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x7zvb"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.372965 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373032 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-combined-ca-bundle\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373073 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-run-httpd\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373121 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-combined-ca-bundle\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373151 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjzkp\" (UniqueName: \"kubernetes.io/projected/659b75fb-742f-4166-ab4b-e5015d05ccc1-kube-api-access-hjzkp\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373177 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-scripts\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373229 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-config\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373276 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-config-data\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373311 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rn4k\" (UniqueName: \"kubernetes.io/projected/dd3c6ef7-71bd-4191-b26a-b56464ec9772-kube-api-access-9rn4k\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373342 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/659b75fb-742f-4166-ab4b-e5015d05ccc1-etc-machine-id\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373371 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnmqr\" (UniqueName: \"kubernetes.io/projected/647fcd2c-c729-4401-95f8-c38dede33299-kube-api-access-hnmqr\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373421 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373466 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-log-httpd\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373532 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-db-sync-config-data\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373575 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-scripts\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.373614 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-config-data\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.374087 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-run-httpd\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.374247 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-log-httpd\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.395681 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.417046 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.423570 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rn4k\" (UniqueName: \"kubernetes.io/projected/dd3c6ef7-71bd-4191-b26a-b56464ec9772-kube-api-access-9rn4k\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.425079 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-config-data\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.428216 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-scripts\") pod \"ceilometer-0\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.451560 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.476787 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-combined-ca-bundle\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477276 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-combined-ca-bundle\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477316 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-combined-ca-bundle\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477335 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjzkp\" (UniqueName: \"kubernetes.io/projected/659b75fb-742f-4166-ab4b-e5015d05ccc1-kube-api-access-hjzkp\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477361 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-scripts\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477392 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-config\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477423 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-config-data\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477444 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-db-sync-config-data\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477487 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/659b75fb-742f-4166-ab4b-e5015d05ccc1-etc-machine-id\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477506 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnmqr\" (UniqueName: \"kubernetes.io/projected/647fcd2c-c729-4401-95f8-c38dede33299-kube-api-access-hnmqr\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477536 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mkscq\" (UniqueName: \"kubernetes.io/projected/953ac15c-533c-4abd-ae8b-e5b8108da094-kube-api-access-mkscq\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.477576 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-db-sync-config-data\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.487090 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/659b75fb-742f-4166-ab4b-e5015d05ccc1-etc-machine-id\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.489371 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-config-data\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.508792 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-combined-ca-bundle\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.517283 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-combined-ca-bundle\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.529269 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-scripts\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.529676 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-db-sync-config-data\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.541539 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75d6d95d77-4vjng"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.543003 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnmqr\" (UniqueName: \"kubernetes.io/projected/647fcd2c-c729-4401-95f8-c38dede33299-kube-api-access-hnmqr\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.543515 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-config\") pod \"neutron-db-sync-h78d6\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.546476 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjzkp\" (UniqueName: \"kubernetes.io/projected/659b75fb-742f-4166-ab4b-e5015d05ccc1-kube-api-access-hjzkp\") pod \"cinder-db-sync-hgfkn\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.583230 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mkscq\" (UniqueName: \"kubernetes.io/projected/953ac15c-533c-4abd-ae8b-e5b8108da094-kube-api-access-mkscq\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.606988 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-combined-ca-bundle\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.607516 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-db-sync-config-data\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.619526 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-db-sync-config-data\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.620434 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-758f67cc8f-8dwzx"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.620960 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mkscq\" (UniqueName: \"kubernetes.io/projected/953ac15c-533c-4abd-ae8b-e5b8108da094-kube-api-access-mkscq\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.622530 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.648761 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-758f67cc8f-8dwzx"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.658575 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-combined-ca-bundle\") pod \"barbican-db-sync-x7zvb\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.679048 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h78d6" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.711420 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-svc\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.711542 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-nb\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.711571 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqsfl\" (UniqueName: \"kubernetes.io/projected/479407be-a07e-4be6-9a4f-a541ba1090ac-kube-api-access-vqsfl\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.711599 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-swift-storage-0\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.711636 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-config\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.711662 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-sb\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.727681 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.766570 5010 generic.go:334] "Generic (PLEG): container finished" podID="ea113023-3903-4ab3-b036-80328c6ba6ca" containerID="eef72c8ede1b706c5fd5317ea82d316b542309243d7659a4a8f9d540f0aeff1c" exitCode=0 Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.766616 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-p4jwm" event={"ID":"ea113023-3903-4ab3-b036-80328c6ba6ca","Type":"ContainerDied","Data":"eef72c8ede1b706c5fd5317ea82d316b542309243d7659a4a8f9d540f0aeff1c"} Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.818584 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.867380 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-svc\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.867473 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-nb\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.867495 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqsfl\" (UniqueName: \"kubernetes.io/projected/479407be-a07e-4be6-9a4f-a541ba1090ac-kube-api-access-vqsfl\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.867521 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-swift-storage-0\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.867558 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-config\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.867579 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-sb\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.868304 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-svc\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.868454 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-sb\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.868930 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-nb\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.869217 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-swift-storage-0\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.869446 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-config\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.870066 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-zbphj"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.872824 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.878346 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.878576 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gm9l5" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.879011 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.880158 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-zbphj"] Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.935107 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqsfl\" (UniqueName: \"kubernetes.io/projected/479407be-a07e-4be6-9a4f-a541ba1090ac-kube-api-access-vqsfl\") pod \"dnsmasq-dns-758f67cc8f-8dwzx\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.970093 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-scripts\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.970636 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-config-data\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.970679 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f9a616d-7152-417c-a196-c16c881631c3-logs\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.970738 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwqf7\" (UniqueName: \"kubernetes.io/projected/2f9a616d-7152-417c-a196-c16c881631c3-kube-api-access-bwqf7\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:23 crc kubenswrapper[5010]: I1126 15:48:23.970866 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-combined-ca-bundle\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.010813 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.072350 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-scripts\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.072406 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-config-data\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.072442 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f9a616d-7152-417c-a196-c16c881631c3-logs\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.072462 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwqf7\" (UniqueName: \"kubernetes.io/projected/2f9a616d-7152-417c-a196-c16c881631c3-kube-api-access-bwqf7\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.077658 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f9a616d-7152-417c-a196-c16c881631c3-logs\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.078140 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-combined-ca-bundle\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.078802 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-9m8zr"] Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.080893 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-config-data\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.085900 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-combined-ca-bundle\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.091368 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-scripts\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: W1126 15:48:24.109013 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda364b16e_a34f_4d0f_a657_40dd81c788f9.slice/crio-8f306b6633c8a0b31873a31fcacc4532e3b0419b286f575a28bb98a1ce2b2f96 WatchSource:0}: Error finding container 8f306b6633c8a0b31873a31fcacc4532e3b0419b286f575a28bb98a1ce2b2f96: Status 404 returned error can't find the container with id 8f306b6633c8a0b31873a31fcacc4532e3b0419b286f575a28bb98a1ce2b2f96 Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.120122 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwqf7\" (UniqueName: \"kubernetes.io/projected/2f9a616d-7152-417c-a196-c16c881631c3-kube-api-access-bwqf7\") pod \"placement-db-sync-zbphj\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.328454 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zbphj" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.335937 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.424295 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75d6d95d77-4vjng"] Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.522068 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-x7zvb"] Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.544432 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-hgfkn"] Nov 26 15:48:24 crc kubenswrapper[5010]: W1126 15:48:24.564988 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod953ac15c_533c_4abd_ae8b_e5b8108da094.slice/crio-c1fc0255c888732ad147bb1b69fdaae5440def359eebc696d414bd1e411a8c3e WatchSource:0}: Error finding container c1fc0255c888732ad147bb1b69fdaae5440def359eebc696d414bd1e411a8c3e: Status 404 returned error can't find the container with id c1fc0255c888732ad147bb1b69fdaae5440def359eebc696d414bd1e411a8c3e Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.619116 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-h78d6"] Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.769973 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-758f67cc8f-8dwzx"] Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.778664 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h78d6" event={"ID":"647fcd2c-c729-4401-95f8-c38dede33299","Type":"ContainerStarted","Data":"7e54ff0925a34b3616affb9a7ec28a97e0c4b237ef51dba9d20cf213f636bef1"} Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.779768 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x7zvb" event={"ID":"953ac15c-533c-4abd-ae8b-e5b8108da094","Type":"ContainerStarted","Data":"c1fc0255c888732ad147bb1b69fdaae5440def359eebc696d414bd1e411a8c3e"} Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.783412 5010 generic.go:334] "Generic (PLEG): container finished" podID="7ddd2fe5-be14-425c-86e9-46c89fac4067" containerID="6eb905944145231a6a3ec94605060841a6c0a3ea77c075a04d3423e62de0680a" exitCode=0 Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.783476 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" event={"ID":"7ddd2fe5-be14-425c-86e9-46c89fac4067","Type":"ContainerDied","Data":"6eb905944145231a6a3ec94605060841a6c0a3ea77c075a04d3423e62de0680a"} Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.783497 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" event={"ID":"7ddd2fe5-be14-425c-86e9-46c89fac4067","Type":"ContainerStarted","Data":"731b9d29735d6617b0c1c8344d5d29e9c9f9e057190c9fd7e3d4449bcdf142fc"} Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.785403 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerStarted","Data":"18aff558ab93214e4696af1b7e166dc365ba95b58a044735dcead1be169b709b"} Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.786843 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hgfkn" event={"ID":"659b75fb-742f-4166-ab4b-e5015d05ccc1","Type":"ContainerStarted","Data":"13d8f970af73f05516922d3baa064778e42699f9b19f0d569110e60bcb8f9139"} Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.790224 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-9m8zr" event={"ID":"a364b16e-a34f-4d0f-a657-40dd81c788f9","Type":"ContainerStarted","Data":"ed3fcfd8e224708b247b12d8cfd27e62d3ef1808e4f71da44902a4741970224a"} Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.790284 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-9m8zr" event={"ID":"a364b16e-a34f-4d0f-a657-40dd81c788f9","Type":"ContainerStarted","Data":"8f306b6633c8a0b31873a31fcacc4532e3b0419b286f575a28bb98a1ce2b2f96"} Nov 26 15:48:24 crc kubenswrapper[5010]: W1126 15:48:24.827438 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod479407be_a07e_4be6_9a4f_a541ba1090ac.slice/crio-005f6d1ef8454b26931e37cfc06a9a4580a2b91ed141aa335653217bdde2ad3f WatchSource:0}: Error finding container 005f6d1ef8454b26931e37cfc06a9a4580a2b91ed141aa335653217bdde2ad3f: Status 404 returned error can't find the container with id 005f6d1ef8454b26931e37cfc06a9a4580a2b91ed141aa335653217bdde2ad3f Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.847080 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-9m8zr" podStartSLOduration=2.847052955 podStartE2EDuration="2.847052955s" podCreationTimestamp="2025-11-26 15:48:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:48:24.824738597 +0000 UTC m=+1325.615455775" watchObservedRunningTime="2025-11-26 15:48:24.847052955 +0000 UTC m=+1325.637770103" Nov 26 15:48:24 crc kubenswrapper[5010]: I1126 15:48:24.929057 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-zbphj"] Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.227100 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.308916 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-sb\") pod \"7ddd2fe5-be14-425c-86e9-46c89fac4067\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.309086 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config\") pod \"7ddd2fe5-be14-425c-86e9-46c89fac4067\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.309132 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-swift-storage-0\") pod \"7ddd2fe5-be14-425c-86e9-46c89fac4067\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.309153 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-svc\") pod \"7ddd2fe5-be14-425c-86e9-46c89fac4067\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.309184 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-nb\") pod \"7ddd2fe5-be14-425c-86e9-46c89fac4067\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.309241 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6wdl\" (UniqueName: \"kubernetes.io/projected/7ddd2fe5-be14-425c-86e9-46c89fac4067-kube-api-access-w6wdl\") pod \"7ddd2fe5-be14-425c-86e9-46c89fac4067\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.318564 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ddd2fe5-be14-425c-86e9-46c89fac4067-kube-api-access-w6wdl" (OuterVolumeSpecName: "kube-api-access-w6wdl") pod "7ddd2fe5-be14-425c-86e9-46c89fac4067" (UID: "7ddd2fe5-be14-425c-86e9-46c89fac4067"). InnerVolumeSpecName "kube-api-access-w6wdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.380579 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7ddd2fe5-be14-425c-86e9-46c89fac4067" (UID: "7ddd2fe5-be14-425c-86e9-46c89fac4067"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.381910 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7ddd2fe5-be14-425c-86e9-46c89fac4067" (UID: "7ddd2fe5-be14-425c-86e9-46c89fac4067"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.387399 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7ddd2fe5-be14-425c-86e9-46c89fac4067" (UID: "7ddd2fe5-be14-425c-86e9-46c89fac4067"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.411362 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config" (OuterVolumeSpecName: "config") pod "7ddd2fe5-be14-425c-86e9-46c89fac4067" (UID: "7ddd2fe5-be14-425c-86e9-46c89fac4067"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.412177 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config\") pod \"7ddd2fe5-be14-425c-86e9-46c89fac4067\" (UID: \"7ddd2fe5-be14-425c-86e9-46c89fac4067\") " Nov 26 15:48:25 crc kubenswrapper[5010]: W1126 15:48:25.412284 5010 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/7ddd2fe5-be14-425c-86e9-46c89fac4067/volumes/kubernetes.io~configmap/config Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.412308 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config" (OuterVolumeSpecName: "config") pod "7ddd2fe5-be14-425c-86e9-46c89fac4067" (UID: "7ddd2fe5-be14-425c-86e9-46c89fac4067"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.412846 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.412873 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.412890 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.412903 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6wdl\" (UniqueName: \"kubernetes.io/projected/7ddd2fe5-be14-425c-86e9-46c89fac4067-kube-api-access-w6wdl\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.412913 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.432180 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7ddd2fe5-be14-425c-86e9-46c89fac4067" (UID: "7ddd2fe5-be14-425c-86e9-46c89fac4067"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.493459 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-p4jwm" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.518134 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ddd2fe5-be14-425c-86e9-46c89fac4067-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.619485 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zl9mt\" (UniqueName: \"kubernetes.io/projected/ea113023-3903-4ab3-b036-80328c6ba6ca-kube-api-access-zl9mt\") pod \"ea113023-3903-4ab3-b036-80328c6ba6ca\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.620601 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-db-sync-config-data\") pod \"ea113023-3903-4ab3-b036-80328c6ba6ca\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.620798 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-combined-ca-bundle\") pod \"ea113023-3903-4ab3-b036-80328c6ba6ca\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.621279 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-config-data\") pod \"ea113023-3903-4ab3-b036-80328c6ba6ca\" (UID: \"ea113023-3903-4ab3-b036-80328c6ba6ca\") " Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.624566 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea113023-3903-4ab3-b036-80328c6ba6ca-kube-api-access-zl9mt" (OuterVolumeSpecName: "kube-api-access-zl9mt") pod "ea113023-3903-4ab3-b036-80328c6ba6ca" (UID: "ea113023-3903-4ab3-b036-80328c6ba6ca"). InnerVolumeSpecName "kube-api-access-zl9mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.625125 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ea113023-3903-4ab3-b036-80328c6ba6ca" (UID: "ea113023-3903-4ab3-b036-80328c6ba6ca"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.654025 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea113023-3903-4ab3-b036-80328c6ba6ca" (UID: "ea113023-3903-4ab3-b036-80328c6ba6ca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.697115 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-config-data" (OuterVolumeSpecName: "config-data") pod "ea113023-3903-4ab3-b036-80328c6ba6ca" (UID: "ea113023-3903-4ab3-b036-80328c6ba6ca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.723689 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.723741 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zl9mt\" (UniqueName: \"kubernetes.io/projected/ea113023-3903-4ab3-b036-80328c6ba6ca-kube-api-access-zl9mt\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.723752 5010 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.723762 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea113023-3903-4ab3-b036-80328c6ba6ca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.810323 5010 generic.go:334] "Generic (PLEG): container finished" podID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerID="b80ac2a8114495aac0c8a48f6d8da27650947451f457da9383120f409ac7ae27" exitCode=0 Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.810409 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" event={"ID":"479407be-a07e-4be6-9a4f-a541ba1090ac","Type":"ContainerDied","Data":"b80ac2a8114495aac0c8a48f6d8da27650947451f457da9383120f409ac7ae27"} Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.810438 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" event={"ID":"479407be-a07e-4be6-9a4f-a541ba1090ac","Type":"ContainerStarted","Data":"005f6d1ef8454b26931e37cfc06a9a4580a2b91ed141aa335653217bdde2ad3f"} Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.820322 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h78d6" event={"ID":"647fcd2c-c729-4401-95f8-c38dede33299","Type":"ContainerStarted","Data":"bb62a936b79835b6c73ac07392dd96fd3fc5d2d4ac67dcace4873b04bd1fc9b7"} Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.855581 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" event={"ID":"7ddd2fe5-be14-425c-86e9-46c89fac4067","Type":"ContainerDied","Data":"731b9d29735d6617b0c1c8344d5d29e9c9f9e057190c9fd7e3d4449bcdf142fc"} Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.855639 5010 scope.go:117] "RemoveContainer" containerID="6eb905944145231a6a3ec94605060841a6c0a3ea77c075a04d3423e62de0680a" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.855780 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75d6d95d77-4vjng" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.867660 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zbphj" event={"ID":"2f9a616d-7152-417c-a196-c16c881631c3","Type":"ContainerStarted","Data":"b689b2617fc447f6b4bf3c783190cc85c72b523faf8706b4c0606155c27bc903"} Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.883449 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-p4jwm" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.884399 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-p4jwm" event={"ID":"ea113023-3903-4ab3-b036-80328c6ba6ca","Type":"ContainerDied","Data":"b753596b7b5c9a3f6dce7633b35853bdb3d506105bc09550795f89ffe9cbef2e"} Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.884427 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b753596b7b5c9a3f6dce7633b35853bdb3d506105bc09550795f89ffe9cbef2e" Nov 26 15:48:25 crc kubenswrapper[5010]: I1126 15:48:25.888854 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-h78d6" podStartSLOduration=2.88883385 podStartE2EDuration="2.88883385s" podCreationTimestamp="2025-11-26 15:48:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:48:25.875118027 +0000 UTC m=+1326.665835175" watchObservedRunningTime="2025-11-26 15:48:25.88883385 +0000 UTC m=+1326.679550998" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.064359 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75d6d95d77-4vjng"] Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.064413 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75d6d95d77-4vjng"] Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.553290 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.574099 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-758f67cc8f-8dwzx"] Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.665795 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-667fd8655c-pp657"] Nov 26 15:48:26 crc kubenswrapper[5010]: E1126 15:48:26.666213 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ddd2fe5-be14-425c-86e9-46c89fac4067" containerName="init" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.666227 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ddd2fe5-be14-425c-86e9-46c89fac4067" containerName="init" Nov 26 15:48:26 crc kubenswrapper[5010]: E1126 15:48:26.666268 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea113023-3903-4ab3-b036-80328c6ba6ca" containerName="glance-db-sync" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.666275 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea113023-3903-4ab3-b036-80328c6ba6ca" containerName="glance-db-sync" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.666486 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ddd2fe5-be14-425c-86e9-46c89fac4067" containerName="init" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.666501 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea113023-3903-4ab3-b036-80328c6ba6ca" containerName="glance-db-sync" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.667481 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.695345 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-667fd8655c-pp657"] Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.777359 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-nb\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.777396 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkff9\" (UniqueName: \"kubernetes.io/projected/eccd03a4-6d90-40d9-b371-7e6737f11862-kube-api-access-hkff9\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.777785 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-swift-storage-0\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.778154 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-sb\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.778234 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-svc\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.778271 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-config\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.880151 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-swift-storage-0\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.880261 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-sb\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.880289 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-svc\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.880310 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-config\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.880371 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-nb\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.880390 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkff9\" (UniqueName: \"kubernetes.io/projected/eccd03a4-6d90-40d9-b371-7e6737f11862-kube-api-access-hkff9\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.881516 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-config\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.881516 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-swift-storage-0\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.885386 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-svc\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.885543 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-nb\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.886319 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-sb\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.913210 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkff9\" (UniqueName: \"kubernetes.io/projected/eccd03a4-6d90-40d9-b371-7e6737f11862-kube-api-access-hkff9\") pod \"dnsmasq-dns-667fd8655c-pp657\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:26 crc kubenswrapper[5010]: I1126 15:48:26.996816 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.557613 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.559491 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.569968 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-q8pkh" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.570200 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.572448 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.572970 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.595835 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.595878 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-config-data\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.595909 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-scripts\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.595945 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.595962 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwmzj\" (UniqueName: \"kubernetes.io/projected/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-kube-api-access-gwmzj\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.595990 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.596033 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-logs\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.647746 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-667fd8655c-pp657"] Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.701369 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-config-data\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.701442 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.701479 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-scripts\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.701546 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.701589 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwmzj\" (UniqueName: \"kubernetes.io/projected/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-kube-api-access-gwmzj\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.701628 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.701678 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-logs\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.702389 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.706821 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-logs\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.711972 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.714574 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.715603 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-scripts\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.726790 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-config-data\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.827377 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwmzj\" (UniqueName: \"kubernetes.io/projected/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-kube-api-access-gwmzj\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.832617 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.895773 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.912516 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ddd2fe5-be14-425c-86e9-46c89fac4067" path="/var/lib/kubelet/pods/7ddd2fe5-be14-425c-86e9-46c89fac4067/volumes" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.953070 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667fd8655c-pp657" event={"ID":"eccd03a4-6d90-40d9-b371-7e6737f11862","Type":"ContainerStarted","Data":"e4dab88cc5becab8887ad535dbf15caa6ac210319320389600d3dba9897e163c"} Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.961823 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.963692 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.966378 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 15:48:27 crc kubenswrapper[5010]: I1126 15:48:27.990878 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.010419 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.010540 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.010570 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wf52\" (UniqueName: \"kubernetes.io/projected/087ab693-f06c-40a0-8be1-5cf1a923f0a3-kube-api-access-7wf52\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.010596 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-logs\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.010623 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.010950 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.011020 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113031 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113161 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113186 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wf52\" (UniqueName: \"kubernetes.io/projected/087ab693-f06c-40a0-8be1-5cf1a923f0a3-kube-api-access-7wf52\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113203 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-logs\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113243 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113361 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113427 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.113950 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.114561 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-logs\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.114645 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.125647 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.126495 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-scripts\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.131535 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7wf52\" (UniqueName: \"kubernetes.io/projected/087ab693-f06c-40a0-8be1-5cf1a923f0a3-kube-api-access-7wf52\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.133900 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-config-data\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.154842 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.348366 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.700899 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:48:28 crc kubenswrapper[5010]: W1126 15:48:28.724466 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaca1bdcb_8331_47b2_8dab_34dc9a73e51b.slice/crio-23cc6d219622e2be78dcd4ff0f61568a8872bffb781da61ed6dbc98814744d43 WatchSource:0}: Error finding container 23cc6d219622e2be78dcd4ff0f61568a8872bffb781da61ed6dbc98814744d43: Status 404 returned error can't find the container with id 23cc6d219622e2be78dcd4ff0f61568a8872bffb781da61ed6dbc98814744d43 Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.974076 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" event={"ID":"479407be-a07e-4be6-9a4f-a541ba1090ac","Type":"ContainerStarted","Data":"4f58005350423aed242028dcb39d262c0f301fe2bd2b708888a300119dde28f2"} Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.974605 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerName="dnsmasq-dns" containerID="cri-o://4f58005350423aed242028dcb39d262c0f301fe2bd2b708888a300119dde28f2" gracePeriod=10 Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.975476 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:28 crc kubenswrapper[5010]: I1126 15:48:28.991375 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aca1bdcb-8331-47b2-8dab-34dc9a73e51b","Type":"ContainerStarted","Data":"23cc6d219622e2be78dcd4ff0f61568a8872bffb781da61ed6dbc98814744d43"} Nov 26 15:48:29 crc kubenswrapper[5010]: I1126 15:48:29.000539 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667fd8655c-pp657" event={"ID":"eccd03a4-6d90-40d9-b371-7e6737f11862","Type":"ContainerStarted","Data":"2d4cda749f7341b684514100f0579125b1a945f21417bca957f98a5ebaeddb10"} Nov 26 15:48:29 crc kubenswrapper[5010]: I1126 15:48:29.004955 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" podStartSLOduration=6.004931332 podStartE2EDuration="6.004931332s" podCreationTimestamp="2025-11-26 15:48:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:48:28.99926549 +0000 UTC m=+1329.789982658" watchObservedRunningTime="2025-11-26 15:48:29.004931332 +0000 UTC m=+1329.795648480" Nov 26 15:48:29 crc kubenswrapper[5010]: I1126 15:48:29.138926 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:48:29 crc kubenswrapper[5010]: W1126 15:48:29.170913 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod087ab693_f06c_40a0_8be1_5cf1a923f0a3.slice/crio-5992ef1877068647ed502aa48cc47c5c080f61ce0aaf4c5012440da466c9fa2f WatchSource:0}: Error finding container 5992ef1877068647ed502aa48cc47c5c080f61ce0aaf4c5012440da466c9fa2f: Status 404 returned error can't find the container with id 5992ef1877068647ed502aa48cc47c5c080f61ce0aaf4c5012440da466c9fa2f Nov 26 15:48:30 crc kubenswrapper[5010]: I1126 15:48:30.021809 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"087ab693-f06c-40a0-8be1-5cf1a923f0a3","Type":"ContainerStarted","Data":"5992ef1877068647ed502aa48cc47c5c080f61ce0aaf4c5012440da466c9fa2f"} Nov 26 15:48:31 crc kubenswrapper[5010]: I1126 15:48:31.035605 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aca1bdcb-8331-47b2-8dab-34dc9a73e51b","Type":"ContainerStarted","Data":"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e"} Nov 26 15:48:31 crc kubenswrapper[5010]: I1126 15:48:31.039637 5010 generic.go:334] "Generic (PLEG): container finished" podID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerID="2d4cda749f7341b684514100f0579125b1a945f21417bca957f98a5ebaeddb10" exitCode=0 Nov 26 15:48:31 crc kubenswrapper[5010]: I1126 15:48:31.039698 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667fd8655c-pp657" event={"ID":"eccd03a4-6d90-40d9-b371-7e6737f11862","Type":"ContainerDied","Data":"2d4cda749f7341b684514100f0579125b1a945f21417bca957f98a5ebaeddb10"} Nov 26 15:48:31 crc kubenswrapper[5010]: I1126 15:48:31.045195 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"087ab693-f06c-40a0-8be1-5cf1a923f0a3","Type":"ContainerStarted","Data":"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15"} Nov 26 15:48:31 crc kubenswrapper[5010]: I1126 15:48:31.047808 5010 generic.go:334] "Generic (PLEG): container finished" podID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerID="4f58005350423aed242028dcb39d262c0f301fe2bd2b708888a300119dde28f2" exitCode=0 Nov 26 15:48:31 crc kubenswrapper[5010]: I1126 15:48:31.047847 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" event={"ID":"479407be-a07e-4be6-9a4f-a541ba1090ac","Type":"ContainerDied","Data":"4f58005350423aed242028dcb39d262c0f301fe2bd2b708888a300119dde28f2"} Nov 26 15:48:34 crc kubenswrapper[5010]: I1126 15:48:34.008956 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:48:34 crc kubenswrapper[5010]: I1126 15:48:34.092566 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.712352 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.870234 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-config\") pod \"479407be-a07e-4be6-9a4f-a541ba1090ac\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.870322 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-sb\") pod \"479407be-a07e-4be6-9a4f-a541ba1090ac\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.870363 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-swift-storage-0\") pod \"479407be-a07e-4be6-9a4f-a541ba1090ac\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.870396 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-svc\") pod \"479407be-a07e-4be6-9a4f-a541ba1090ac\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.870465 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-nb\") pod \"479407be-a07e-4be6-9a4f-a541ba1090ac\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.870558 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqsfl\" (UniqueName: \"kubernetes.io/projected/479407be-a07e-4be6-9a4f-a541ba1090ac-kube-api-access-vqsfl\") pod \"479407be-a07e-4be6-9a4f-a541ba1090ac\" (UID: \"479407be-a07e-4be6-9a4f-a541ba1090ac\") " Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.876487 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/479407be-a07e-4be6-9a4f-a541ba1090ac-kube-api-access-vqsfl" (OuterVolumeSpecName: "kube-api-access-vqsfl") pod "479407be-a07e-4be6-9a4f-a541ba1090ac" (UID: "479407be-a07e-4be6-9a4f-a541ba1090ac"). InnerVolumeSpecName "kube-api-access-vqsfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.937477 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-config" (OuterVolumeSpecName: "config") pod "479407be-a07e-4be6-9a4f-a541ba1090ac" (UID: "479407be-a07e-4be6-9a4f-a541ba1090ac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.938849 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "479407be-a07e-4be6-9a4f-a541ba1090ac" (UID: "479407be-a07e-4be6-9a4f-a541ba1090ac"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.940477 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "479407be-a07e-4be6-9a4f-a541ba1090ac" (UID: "479407be-a07e-4be6-9a4f-a541ba1090ac"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.945192 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "479407be-a07e-4be6-9a4f-a541ba1090ac" (UID: "479407be-a07e-4be6-9a4f-a541ba1090ac"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.948077 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "479407be-a07e-4be6-9a4f-a541ba1090ac" (UID: "479407be-a07e-4be6-9a4f-a541ba1090ac"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.973542 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.973602 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqsfl\" (UniqueName: \"kubernetes.io/projected/479407be-a07e-4be6-9a4f-a541ba1090ac-kube-api-access-vqsfl\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.973617 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.973629 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.973639 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:37 crc kubenswrapper[5010]: I1126 15:48:37.973649 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/479407be-a07e-4be6-9a4f-a541ba1090ac-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:38 crc kubenswrapper[5010]: I1126 15:48:38.137285 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" event={"ID":"479407be-a07e-4be6-9a4f-a541ba1090ac","Type":"ContainerDied","Data":"005f6d1ef8454b26931e37cfc06a9a4580a2b91ed141aa335653217bdde2ad3f"} Nov 26 15:48:38 crc kubenswrapper[5010]: I1126 15:48:38.137356 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" Nov 26 15:48:38 crc kubenswrapper[5010]: I1126 15:48:38.137362 5010 scope.go:117] "RemoveContainer" containerID="4f58005350423aed242028dcb39d262c0f301fe2bd2b708888a300119dde28f2" Nov 26 15:48:38 crc kubenswrapper[5010]: I1126 15:48:38.185966 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-758f67cc8f-8dwzx"] Nov 26 15:48:38 crc kubenswrapper[5010]: I1126 15:48:38.198031 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-758f67cc8f-8dwzx"] Nov 26 15:48:39 crc kubenswrapper[5010]: I1126 15:48:39.013044 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-758f67cc8f-8dwzx" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: i/o timeout" Nov 26 15:48:39 crc kubenswrapper[5010]: I1126 15:48:39.908798 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" path="/var/lib/kubelet/pods/479407be-a07e-4be6-9a4f-a541ba1090ac/volumes" Nov 26 15:48:40 crc kubenswrapper[5010]: I1126 15:48:40.459025 5010 scope.go:117] "RemoveContainer" containerID="b80ac2a8114495aac0c8a48f6d8da27650947451f457da9383120f409ac7ae27" Nov 26 15:48:40 crc kubenswrapper[5010]: E1126 15:48:40.535700 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099" Nov 26 15:48:40 crc kubenswrapper[5010]: E1126 15:48:40.535969 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bwqf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-zbphj_openstack(2f9a616d-7152-417c-a196-c16c881631c3): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:48:40 crc kubenswrapper[5010]: E1126 15:48:40.537273 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-zbphj" podUID="2f9a616d-7152-417c-a196-c16c881631c3" Nov 26 15:48:41 crc kubenswrapper[5010]: I1126 15:48:41.168788 5010 generic.go:334] "Generic (PLEG): container finished" podID="a364b16e-a34f-4d0f-a657-40dd81c788f9" containerID="ed3fcfd8e224708b247b12d8cfd27e62d3ef1808e4f71da44902a4741970224a" exitCode=0 Nov 26 15:48:41 crc kubenswrapper[5010]: I1126 15:48:41.168874 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-9m8zr" event={"ID":"a364b16e-a34f-4d0f-a657-40dd81c788f9","Type":"ContainerDied","Data":"ed3fcfd8e224708b247b12d8cfd27e62d3ef1808e4f71da44902a4741970224a"} Nov 26 15:48:41 crc kubenswrapper[5010]: E1126 15:48:41.171253 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099\\\"\"" pod="openstack/placement-db-sync-zbphj" podUID="2f9a616d-7152-417c-a196-c16c881631c3" Nov 26 15:48:52 crc kubenswrapper[5010]: E1126 15:48:52.268898 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140" Nov 26 15:48:52 crc kubenswrapper[5010]: E1126 15:48:52.269499 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n666hd5h57h5f7h96h5bdh68ch59dh5cfh6dhd7h546h57ch5ffh565hdchc9hfbhfdh557h66h99h94h5d8h664h67dh59ch659h65fh664h565h5d7q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9rn4k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(dd3c6ef7-71bd-4191-b26a-b56464ec9772): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:48:52 crc kubenswrapper[5010]: I1126 15:48:52.272231 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.071482 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.205606 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-config-data\") pod \"a364b16e-a34f-4d0f-a657-40dd81c788f9\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.205758 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-combined-ca-bundle\") pod \"a364b16e-a34f-4d0f-a657-40dd81c788f9\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.205866 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-credential-keys\") pod \"a364b16e-a34f-4d0f-a657-40dd81c788f9\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.205972 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x6nx6\" (UniqueName: \"kubernetes.io/projected/a364b16e-a34f-4d0f-a657-40dd81c788f9-kube-api-access-x6nx6\") pod \"a364b16e-a34f-4d0f-a657-40dd81c788f9\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.206014 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-scripts\") pod \"a364b16e-a34f-4d0f-a657-40dd81c788f9\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.206072 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-fernet-keys\") pod \"a364b16e-a34f-4d0f-a657-40dd81c788f9\" (UID: \"a364b16e-a34f-4d0f-a657-40dd81c788f9\") " Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.215080 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-scripts" (OuterVolumeSpecName: "scripts") pod "a364b16e-a34f-4d0f-a657-40dd81c788f9" (UID: "a364b16e-a34f-4d0f-a657-40dd81c788f9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.216208 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a364b16e-a34f-4d0f-a657-40dd81c788f9-kube-api-access-x6nx6" (OuterVolumeSpecName: "kube-api-access-x6nx6") pod "a364b16e-a34f-4d0f-a657-40dd81c788f9" (UID: "a364b16e-a34f-4d0f-a657-40dd81c788f9"). InnerVolumeSpecName "kube-api-access-x6nx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.218637 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a364b16e-a34f-4d0f-a657-40dd81c788f9" (UID: "a364b16e-a34f-4d0f-a657-40dd81c788f9"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.220853 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a364b16e-a34f-4d0f-a657-40dd81c788f9" (UID: "a364b16e-a34f-4d0f-a657-40dd81c788f9"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.244554 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-config-data" (OuterVolumeSpecName: "config-data") pod "a364b16e-a34f-4d0f-a657-40dd81c788f9" (UID: "a364b16e-a34f-4d0f-a657-40dd81c788f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.245925 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a364b16e-a34f-4d0f-a657-40dd81c788f9" (UID: "a364b16e-a34f-4d0f-a657-40dd81c788f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.299988 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-9m8zr" event={"ID":"a364b16e-a34f-4d0f-a657-40dd81c788f9","Type":"ContainerDied","Data":"8f306b6633c8a0b31873a31fcacc4532e3b0419b286f575a28bb98a1ce2b2f96"} Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.300103 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f306b6633c8a0b31873a31fcacc4532e3b0419b286f575a28bb98a1ce2b2f96" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.300197 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-9m8zr" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.308694 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.308741 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.308752 5010 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.308763 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x6nx6\" (UniqueName: \"kubernetes.io/projected/a364b16e-a34f-4d0f-a657-40dd81c788f9-kube-api-access-x6nx6\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.308777 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:54 crc kubenswrapper[5010]: I1126 15:48:54.308787 5010 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a364b16e-a34f-4d0f-a657-40dd81c788f9-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.150359 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-9m8zr"] Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.159279 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-9m8zr"] Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.269739 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-58b6s"] Nov 26 15:48:55 crc kubenswrapper[5010]: E1126 15:48:55.270138 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerName="init" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.270153 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerName="init" Nov 26 15:48:55 crc kubenswrapper[5010]: E1126 15:48:55.270180 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a364b16e-a34f-4d0f-a657-40dd81c788f9" containerName="keystone-bootstrap" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.270188 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a364b16e-a34f-4d0f-a657-40dd81c788f9" containerName="keystone-bootstrap" Nov 26 15:48:55 crc kubenswrapper[5010]: E1126 15:48:55.270218 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerName="dnsmasq-dns" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.270227 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerName="dnsmasq-dns" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.270439 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="479407be-a07e-4be6-9a4f-a541ba1090ac" containerName="dnsmasq-dns" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.270466 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a364b16e-a34f-4d0f-a657-40dd81c788f9" containerName="keystone-bootstrap" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.271147 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.274851 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.274912 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.275642 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.275972 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xzc9m" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.276827 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.285563 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-58b6s"] Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.331082 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-combined-ca-bundle\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.331150 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-scripts\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.331209 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-fernet-keys\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.331257 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-credential-keys\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.331282 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-config-data\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.331345 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tggw\" (UniqueName: \"kubernetes.io/projected/56eb624a-00e8-476d-b468-aa83bc64faad-kube-api-access-9tggw\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.432946 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-combined-ca-bundle\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.433006 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-scripts\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.433055 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-fernet-keys\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.433090 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-credential-keys\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.433112 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-config-data\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.433163 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tggw\" (UniqueName: \"kubernetes.io/projected/56eb624a-00e8-476d-b468-aa83bc64faad-kube-api-access-9tggw\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.440970 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-fernet-keys\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.441063 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-config-data\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.441846 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-combined-ca-bundle\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.444624 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-credential-keys\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.444863 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-scripts\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.450424 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tggw\" (UniqueName: \"kubernetes.io/projected/56eb624a-00e8-476d-b468-aa83bc64faad-kube-api-access-9tggw\") pod \"keystone-bootstrap-58b6s\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.590639 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:48:55 crc kubenswrapper[5010]: I1126 15:48:55.911361 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a364b16e-a34f-4d0f-a657-40dd81c788f9" path="/var/lib/kubelet/pods/a364b16e-a34f-4d0f-a657-40dd81c788f9/volumes" Nov 26 15:48:58 crc kubenswrapper[5010]: E1126 15:48:58.733273 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879" Nov 26 15:48:58 crc kubenswrapper[5010]: E1126 15:48:58.734313 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hjzkp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-hgfkn_openstack(659b75fb-742f-4166-ab4b-e5015d05ccc1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:48:58 crc kubenswrapper[5010]: E1126 15:48:58.735689 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-hgfkn" podUID="659b75fb-742f-4166-ab4b-e5015d05ccc1" Nov 26 15:48:59 crc kubenswrapper[5010]: E1126 15:48:59.205290 5010 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645" Nov 26 15:48:59 crc kubenswrapper[5010]: E1126 15:48:59.205444 5010 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mkscq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-x7zvb_openstack(953ac15c-533c-4abd-ae8b-e5b8108da094): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 26 15:48:59 crc kubenswrapper[5010]: E1126 15:48:59.206967 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-x7zvb" podUID="953ac15c-533c-4abd-ae8b-e5b8108da094" Nov 26 15:48:59 crc kubenswrapper[5010]: E1126 15:48:59.357794 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879\\\"\"" pod="openstack/cinder-db-sync-hgfkn" podUID="659b75fb-742f-4166-ab4b-e5015d05ccc1" Nov 26 15:48:59 crc kubenswrapper[5010]: E1126 15:48:59.359024 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645\\\"\"" pod="openstack/barbican-db-sync-x7zvb" podUID="953ac15c-533c-4abd-ae8b-e5b8108da094" Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.185390 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-58b6s"] Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.367491 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerStarted","Data":"b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5"} Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.371905 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667fd8655c-pp657" event={"ID":"eccd03a4-6d90-40d9-b371-7e6737f11862","Type":"ContainerStarted","Data":"029c55062d788ba70d12a95d7deb331ec9ef320512ee7519131e8ba85e747280"} Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.371985 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.377366 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"087ab693-f06c-40a0-8be1-5cf1a923f0a3","Type":"ContainerStarted","Data":"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc"} Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.377547 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-log" containerID="cri-o://e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15" gracePeriod=30 Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.377817 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-httpd" containerID="cri-o://3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc" gracePeriod=30 Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.384106 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-58b6s" event={"ID":"56eb624a-00e8-476d-b468-aa83bc64faad","Type":"ContainerStarted","Data":"49534570a907319f553695e595c01f0f37885a56d6483f028a15bd13ed49881a"} Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.388143 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zbphj" event={"ID":"2f9a616d-7152-417c-a196-c16c881631c3","Type":"ContainerStarted","Data":"333b57ad505174d4546e1edf9a1b810ad396148113c96a3f290db461a21adfb1"} Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.449741 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-667fd8655c-pp657" podStartSLOduration=34.449696713 podStartE2EDuration="34.449696713s" podCreationTimestamp="2025-11-26 15:48:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:00.391238071 +0000 UTC m=+1361.181955239" watchObservedRunningTime="2025-11-26 15:49:00.449696713 +0000 UTC m=+1361.240413861" Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.453955 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aca1bdcb-8331-47b2-8dab-34dc9a73e51b","Type":"ContainerStarted","Data":"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e"} Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.454021 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-log" containerID="cri-o://c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e" gracePeriod=30 Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.454383 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-httpd" containerID="cri-o://0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e" gracePeriod=30 Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.462907 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-zbphj" podStartSLOduration=2.6960187639999997 podStartE2EDuration="37.462887043s" podCreationTimestamp="2025-11-26 15:48:23 +0000 UTC" firstStartedPulling="2025-11-26 15:48:24.951242512 +0000 UTC m=+1325.741959660" lastFinishedPulling="2025-11-26 15:48:59.718110791 +0000 UTC m=+1360.508827939" observedRunningTime="2025-11-26 15:49:00.424183025 +0000 UTC m=+1361.214900183" watchObservedRunningTime="2025-11-26 15:49:00.462887043 +0000 UTC m=+1361.253604191" Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.484854 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=34.484819062 podStartE2EDuration="34.484819062s" podCreationTimestamp="2025-11-26 15:48:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:00.452500103 +0000 UTC m=+1361.243217251" watchObservedRunningTime="2025-11-26 15:49:00.484819062 +0000 UTC m=+1361.275536210" Nov 26 15:49:00 crc kubenswrapper[5010]: I1126 15:49:00.514370 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=34.51434899 podStartE2EDuration="34.51434899s" podCreationTimestamp="2025-11-26 15:48:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:00.48156337 +0000 UTC m=+1361.272280518" watchObservedRunningTime="2025-11-26 15:49:00.51434899 +0000 UTC m=+1361.305066138" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.008362 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.070410 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-config-data\") pod \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.070483 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-scripts\") pod \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.070595 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7wf52\" (UniqueName: \"kubernetes.io/projected/087ab693-f06c-40a0-8be1-5cf1a923f0a3-kube-api-access-7wf52\") pod \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.070634 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-combined-ca-bundle\") pod \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.070790 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-logs\") pod \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.070817 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.070935 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-httpd-run\") pod \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\" (UID: \"087ab693-f06c-40a0-8be1-5cf1a923f0a3\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.072345 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-logs" (OuterVolumeSpecName: "logs") pod "087ab693-f06c-40a0-8be1-5cf1a923f0a3" (UID: "087ab693-f06c-40a0-8be1-5cf1a923f0a3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.072894 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "087ab693-f06c-40a0-8be1-5cf1a923f0a3" (UID: "087ab693-f06c-40a0-8be1-5cf1a923f0a3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.079361 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-scripts" (OuterVolumeSpecName: "scripts") pod "087ab693-f06c-40a0-8be1-5cf1a923f0a3" (UID: "087ab693-f06c-40a0-8be1-5cf1a923f0a3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.084139 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "087ab693-f06c-40a0-8be1-5cf1a923f0a3" (UID: "087ab693-f06c-40a0-8be1-5cf1a923f0a3"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.084998 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/087ab693-f06c-40a0-8be1-5cf1a923f0a3-kube-api-access-7wf52" (OuterVolumeSpecName: "kube-api-access-7wf52") pod "087ab693-f06c-40a0-8be1-5cf1a923f0a3" (UID: "087ab693-f06c-40a0-8be1-5cf1a923f0a3"). InnerVolumeSpecName "kube-api-access-7wf52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.106157 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "087ab693-f06c-40a0-8be1-5cf1a923f0a3" (UID: "087ab693-f06c-40a0-8be1-5cf1a923f0a3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.123100 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.139469 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-config-data" (OuterVolumeSpecName: "config-data") pod "087ab693-f06c-40a0-8be1-5cf1a923f0a3" (UID: "087ab693-f06c-40a0-8be1-5cf1a923f0a3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.172100 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.172200 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-httpd-run\") pod \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.172255 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-logs\") pod \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.172685 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-combined-ca-bundle\") pod \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.172622 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-logs" (OuterVolumeSpecName: "logs") pod "aca1bdcb-8331-47b2-8dab-34dc9a73e51b" (UID: "aca1bdcb-8331-47b2-8dab-34dc9a73e51b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.172776 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-scripts\") pod \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.173148 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "aca1bdcb-8331-47b2-8dab-34dc9a73e51b" (UID: "aca1bdcb-8331-47b2-8dab-34dc9a73e51b"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.173327 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-config-data\") pod \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.173405 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gwmzj\" (UniqueName: \"kubernetes.io/projected/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-kube-api-access-gwmzj\") pod \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\" (UID: \"aca1bdcb-8331-47b2-8dab-34dc9a73e51b\") " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.173997 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174022 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174035 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174048 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wf52\" (UniqueName: \"kubernetes.io/projected/087ab693-f06c-40a0-8be1-5cf1a923f0a3-kube-api-access-7wf52\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174062 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/087ab693-f06c-40a0-8be1-5cf1a923f0a3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174072 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174108 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174124 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/087ab693-f06c-40a0-8be1-5cf1a923f0a3-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.174134 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.177524 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-kube-api-access-gwmzj" (OuterVolumeSpecName: "kube-api-access-gwmzj") pod "aca1bdcb-8331-47b2-8dab-34dc9a73e51b" (UID: "aca1bdcb-8331-47b2-8dab-34dc9a73e51b"). InnerVolumeSpecName "kube-api-access-gwmzj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.178135 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-scripts" (OuterVolumeSpecName: "scripts") pod "aca1bdcb-8331-47b2-8dab-34dc9a73e51b" (UID: "aca1bdcb-8331-47b2-8dab-34dc9a73e51b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.179176 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "aca1bdcb-8331-47b2-8dab-34dc9a73e51b" (UID: "aca1bdcb-8331-47b2-8dab-34dc9a73e51b"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.197281 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aca1bdcb-8331-47b2-8dab-34dc9a73e51b" (UID: "aca1bdcb-8331-47b2-8dab-34dc9a73e51b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.197657 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.219638 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-config-data" (OuterVolumeSpecName: "config-data") pod "aca1bdcb-8331-47b2-8dab-34dc9a73e51b" (UID: "aca1bdcb-8331-47b2-8dab-34dc9a73e51b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.282136 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.282170 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gwmzj\" (UniqueName: \"kubernetes.io/projected/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-kube-api-access-gwmzj\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.282181 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.282217 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.282227 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.282238 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/aca1bdcb-8331-47b2-8dab-34dc9a73e51b-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.300789 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.384243 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.466966 5010 generic.go:334] "Generic (PLEG): container finished" podID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerID="0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e" exitCode=0 Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.467299 5010 generic.go:334] "Generic (PLEG): container finished" podID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerID="c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e" exitCode=143 Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.467087 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aca1bdcb-8331-47b2-8dab-34dc9a73e51b","Type":"ContainerDied","Data":"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e"} Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.467060 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.467842 5010 scope.go:117] "RemoveContainer" containerID="0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.467748 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aca1bdcb-8331-47b2-8dab-34dc9a73e51b","Type":"ContainerDied","Data":"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e"} Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.468284 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"aca1bdcb-8331-47b2-8dab-34dc9a73e51b","Type":"ContainerDied","Data":"23cc6d219622e2be78dcd4ff0f61568a8872bffb781da61ed6dbc98814744d43"} Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.479733 5010 generic.go:334] "Generic (PLEG): container finished" podID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerID="3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc" exitCode=0 Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.479774 5010 generic.go:334] "Generic (PLEG): container finished" podID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerID="e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15" exitCode=143 Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.479953 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"087ab693-f06c-40a0-8be1-5cf1a923f0a3","Type":"ContainerDied","Data":"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc"} Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.480004 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"087ab693-f06c-40a0-8be1-5cf1a923f0a3","Type":"ContainerDied","Data":"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15"} Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.480022 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"087ab693-f06c-40a0-8be1-5cf1a923f0a3","Type":"ContainerDied","Data":"5992ef1877068647ed502aa48cc47c5c080f61ce0aaf4c5012440da466c9fa2f"} Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.479969 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.484127 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-58b6s" event={"ID":"56eb624a-00e8-476d-b468-aa83bc64faad","Type":"ContainerStarted","Data":"16c3ef01ba3fcc61ab3efc7446b80803148d81f97d6e11a28601111c5763f722"} Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.521434 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-58b6s" podStartSLOduration=6.521408972 podStartE2EDuration="6.521408972s" podCreationTimestamp="2025-11-26 15:48:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:01.505882054 +0000 UTC m=+1362.296599212" watchObservedRunningTime="2025-11-26 15:49:01.521408972 +0000 UTC m=+1362.312126140" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.523541 5010 scope.go:117] "RemoveContainer" containerID="c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.532895 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.549062 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.555608 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.563810 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570285 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.570654 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-log" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570670 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-log" Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.570691 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-log" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570697 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-log" Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.570725 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-httpd" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570734 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-httpd" Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.570763 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-httpd" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570771 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-httpd" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570945 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-httpd" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570968 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-log" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570980 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" containerName="glance-log" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.570995 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" containerName="glance-httpd" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.571933 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.581803 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.582012 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-q8pkh" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.585394 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.585928 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.591991 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.593606 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.599970 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.632155 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.632595 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.632870 5010 scope.go:117] "RemoveContainer" containerID="0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e" Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.636057 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e\": container with ID starting with 0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e not found: ID does not exist" containerID="0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.636094 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e"} err="failed to get container status \"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e\": rpc error: code = NotFound desc = could not find container \"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e\": container with ID starting with 0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.636122 5010 scope.go:117] "RemoveContainer" containerID="c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e" Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.637162 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e\": container with ID starting with c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e not found: ID does not exist" containerID="c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.637182 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e"} err="failed to get container status \"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e\": rpc error: code = NotFound desc = could not find container \"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e\": container with ID starting with c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.637195 5010 scope.go:117] "RemoveContainer" containerID="0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.637578 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e"} err="failed to get container status \"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e\": rpc error: code = NotFound desc = could not find container \"0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e\": container with ID starting with 0c5d29317ad21d6cc429dc3372b3c2c123ecd661ef507873db7ef9ae61c56b3e not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.637615 5010 scope.go:117] "RemoveContainer" containerID="c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.637956 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e"} err="failed to get container status \"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e\": rpc error: code = NotFound desc = could not find container \"c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e\": container with ID starting with c6d5b7ba64eafa28164d89f4d4652cef0bcf9fdd1526500ef8ca7c7e4da37e0e not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.637974 5010 scope.go:117] "RemoveContainer" containerID="3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.668129 5010 scope.go:117] "RemoveContainer" containerID="e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691698 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691756 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691809 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691858 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691875 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-logs\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691891 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691908 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-config-data\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691932 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691955 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.691972 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4trk6\" (UniqueName: \"kubernetes.io/projected/13084c10-bc6a-48a7-8624-a405f5d06e3d-kube-api-access-4trk6\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.692011 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.692294 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-logs\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.692368 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7mqk\" (UniqueName: \"kubernetes.io/projected/b4496d49-7b88-4c60-9fd5-fe0608f52b13-kube-api-access-z7mqk\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.692404 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-scripts\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.692439 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.692469 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.693308 5010 scope.go:117] "RemoveContainer" containerID="3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc" Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.693879 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc\": container with ID starting with 3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc not found: ID does not exist" containerID="3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.693924 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc"} err="failed to get container status \"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc\": rpc error: code = NotFound desc = could not find container \"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc\": container with ID starting with 3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.693955 5010 scope.go:117] "RemoveContainer" containerID="e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15" Nov 26 15:49:01 crc kubenswrapper[5010]: E1126 15:49:01.694263 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15\": container with ID starting with e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15 not found: ID does not exist" containerID="e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.694280 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15"} err="failed to get container status \"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15\": rpc error: code = NotFound desc = could not find container \"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15\": container with ID starting with e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15 not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.694294 5010 scope.go:117] "RemoveContainer" containerID="3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.694376 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.694612 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc"} err="failed to get container status \"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc\": rpc error: code = NotFound desc = could not find container \"3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc\": container with ID starting with 3ae9244013866d4fcd1d71228fbaca10a6fa7aafc1bec12dc40e92e1943bdedc not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.694629 5010 scope.go:117] "RemoveContainer" containerID="e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.695016 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15"} err="failed to get container status \"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15\": rpc error: code = NotFound desc = could not find container \"e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15\": container with ID starting with e72211ab8a87159cfe5d5456b8b75e5b554cc97fbfcb18f245f35e5b3d4f0f15 not found: ID does not exist" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.794727 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7mqk\" (UniqueName: \"kubernetes.io/projected/b4496d49-7b88-4c60-9fd5-fe0608f52b13-kube-api-access-z7mqk\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.794794 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-scripts\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.794833 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.794876 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.794924 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.794984 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795112 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795221 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795247 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-logs\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795258 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795270 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795575 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-config-data\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795659 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795757 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795799 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4trk6\" (UniqueName: \"kubernetes.io/projected/13084c10-bc6a-48a7-8624-a405f5d06e3d-kube-api-access-4trk6\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.795964 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.796037 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-logs\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.796205 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.796700 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.800681 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-scripts\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.801093 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-logs\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.801179 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.801457 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-logs\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.806799 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.810576 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.811732 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.815104 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.822229 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-config-data\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.825172 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7mqk\" (UniqueName: \"kubernetes.io/projected/b4496d49-7b88-4c60-9fd5-fe0608f52b13-kube-api-access-z7mqk\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.826680 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4trk6\" (UniqueName: \"kubernetes.io/projected/13084c10-bc6a-48a7-8624-a405f5d06e3d-kube-api-access-4trk6\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.838033 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.853167 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.859026 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.878066 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.934259 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="087ab693-f06c-40a0-8be1-5cf1a923f0a3" path="/var/lib/kubelet/pods/087ab693-f06c-40a0-8be1-5cf1a923f0a3/volumes" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.935182 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aca1bdcb-8331-47b2-8dab-34dc9a73e51b" path="/var/lib/kubelet/pods/aca1bdcb-8331-47b2-8dab-34dc9a73e51b/volumes" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.965234 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:49:01 crc kubenswrapper[5010]: I1126 15:49:01.973876 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:02 crc kubenswrapper[5010]: I1126 15:49:02.643059 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:49:02 crc kubenswrapper[5010]: W1126 15:49:02.673937 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13084c10_bc6a_48a7_8624_a405f5d06e3d.slice/crio-5296c4177f4329f3dce5f739eaeaff11a945ce94eea38f794ad51f8670aecf3b WatchSource:0}: Error finding container 5296c4177f4329f3dce5f739eaeaff11a945ce94eea38f794ad51f8670aecf3b: Status 404 returned error can't find the container with id 5296c4177f4329f3dce5f739eaeaff11a945ce94eea38f794ad51f8670aecf3b Nov 26 15:49:03 crc kubenswrapper[5010]: I1126 15:49:03.476115 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:49:03 crc kubenswrapper[5010]: I1126 15:49:03.523016 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"13084c10-bc6a-48a7-8624-a405f5d06e3d","Type":"ContainerStarted","Data":"827c329ecf0f42138145a53ff8287287931b6a87e623afc3834445f7de0125e6"} Nov 26 15:49:03 crc kubenswrapper[5010]: I1126 15:49:03.523080 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"13084c10-bc6a-48a7-8624-a405f5d06e3d","Type":"ContainerStarted","Data":"5296c4177f4329f3dce5f739eaeaff11a945ce94eea38f794ad51f8670aecf3b"} Nov 26 15:49:03 crc kubenswrapper[5010]: I1126 15:49:03.537052 5010 generic.go:334] "Generic (PLEG): container finished" podID="2f9a616d-7152-417c-a196-c16c881631c3" containerID="333b57ad505174d4546e1edf9a1b810ad396148113c96a3f290db461a21adfb1" exitCode=0 Nov 26 15:49:03 crc kubenswrapper[5010]: I1126 15:49:03.537116 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zbphj" event={"ID":"2f9a616d-7152-417c-a196-c16c881631c3","Type":"ContainerDied","Data":"333b57ad505174d4546e1edf9a1b810ad396148113c96a3f290db461a21adfb1"} Nov 26 15:49:04 crc kubenswrapper[5010]: I1126 15:49:04.555393 5010 generic.go:334] "Generic (PLEG): container finished" podID="56eb624a-00e8-476d-b468-aa83bc64faad" containerID="16c3ef01ba3fcc61ab3efc7446b80803148d81f97d6e11a28601111c5763f722" exitCode=0 Nov 26 15:49:04 crc kubenswrapper[5010]: I1126 15:49:04.556043 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-58b6s" event={"ID":"56eb624a-00e8-476d-b468-aa83bc64faad","Type":"ContainerDied","Data":"16c3ef01ba3fcc61ab3efc7446b80803148d81f97d6e11a28601111c5763f722"} Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.341673 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zbphj" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.399388 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwqf7\" (UniqueName: \"kubernetes.io/projected/2f9a616d-7152-417c-a196-c16c881631c3-kube-api-access-bwqf7\") pod \"2f9a616d-7152-417c-a196-c16c881631c3\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.399562 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-scripts\") pod \"2f9a616d-7152-417c-a196-c16c881631c3\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.399606 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f9a616d-7152-417c-a196-c16c881631c3-logs\") pod \"2f9a616d-7152-417c-a196-c16c881631c3\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.399640 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-config-data\") pod \"2f9a616d-7152-417c-a196-c16c881631c3\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.400371 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f9a616d-7152-417c-a196-c16c881631c3-logs" (OuterVolumeSpecName: "logs") pod "2f9a616d-7152-417c-a196-c16c881631c3" (UID: "2f9a616d-7152-417c-a196-c16c881631c3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.400580 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-combined-ca-bundle\") pod \"2f9a616d-7152-417c-a196-c16c881631c3\" (UID: \"2f9a616d-7152-417c-a196-c16c881631c3\") " Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.401864 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2f9a616d-7152-417c-a196-c16c881631c3-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.407396 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f9a616d-7152-417c-a196-c16c881631c3-kube-api-access-bwqf7" (OuterVolumeSpecName: "kube-api-access-bwqf7") pod "2f9a616d-7152-417c-a196-c16c881631c3" (UID: "2f9a616d-7152-417c-a196-c16c881631c3"). InnerVolumeSpecName "kube-api-access-bwqf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.443030 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-scripts" (OuterVolumeSpecName: "scripts") pod "2f9a616d-7152-417c-a196-c16c881631c3" (UID: "2f9a616d-7152-417c-a196-c16c881631c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.459372 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-config-data" (OuterVolumeSpecName: "config-data") pod "2f9a616d-7152-417c-a196-c16c881631c3" (UID: "2f9a616d-7152-417c-a196-c16c881631c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.463975 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f9a616d-7152-417c-a196-c16c881631c3" (UID: "2f9a616d-7152-417c-a196-c16c881631c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.503294 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.503897 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.503961 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f9a616d-7152-417c-a196-c16c881631c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.504043 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwqf7\" (UniqueName: \"kubernetes.io/projected/2f9a616d-7152-417c-a196-c16c881631c3-kube-api-access-bwqf7\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.570826 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-zbphj" event={"ID":"2f9a616d-7152-417c-a196-c16c881631c3","Type":"ContainerDied","Data":"b689b2617fc447f6b4bf3c783190cc85c72b523faf8706b4c0606155c27bc903"} Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.570882 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b689b2617fc447f6b4bf3c783190cc85c72b523faf8706b4c0606155c27bc903" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.570849 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-zbphj" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.690667 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-546d9f9b4-87p6s"] Nov 26 15:49:05 crc kubenswrapper[5010]: E1126 15:49:05.692001 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9a616d-7152-417c-a196-c16c881631c3" containerName="placement-db-sync" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.692095 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9a616d-7152-417c-a196-c16c881631c3" containerName="placement-db-sync" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.692378 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f9a616d-7152-417c-a196-c16c881631c3" containerName="placement-db-sync" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.693795 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.697085 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.697094 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.697093 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.697772 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gm9l5" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.697781 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.714660 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-546d9f9b4-87p6s"] Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.828223 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-internal-tls-certs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.828322 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-public-tls-certs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.828381 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-scripts\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.828541 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9651251a-a0b2-4db8-bb82-b22a707bd7ab-logs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.828775 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nn9td\" (UniqueName: \"kubernetes.io/projected/9651251a-a0b2-4db8-bb82-b22a707bd7ab-kube-api-access-nn9td\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.829148 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-combined-ca-bundle\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.829332 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-config-data\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.930855 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9651251a-a0b2-4db8-bb82-b22a707bd7ab-logs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.931285 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nn9td\" (UniqueName: \"kubernetes.io/projected/9651251a-a0b2-4db8-bb82-b22a707bd7ab-kube-api-access-nn9td\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.931395 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-combined-ca-bundle\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.931430 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-config-data\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.931482 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-internal-tls-certs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.931532 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-public-tls-certs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.931574 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-scripts\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.933052 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9651251a-a0b2-4db8-bb82-b22a707bd7ab-logs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.938787 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-combined-ca-bundle\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.938942 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-scripts\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.939861 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-config-data\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.940396 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-internal-tls-certs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.945681 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-public-tls-certs\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:05 crc kubenswrapper[5010]: I1126 15:49:05.950561 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nn9td\" (UniqueName: \"kubernetes.io/projected/9651251a-a0b2-4db8-bb82-b22a707bd7ab-kube-api-access-nn9td\") pod \"placement-546d9f9b4-87p6s\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:06 crc kubenswrapper[5010]: I1126 15:49:06.017163 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:07 crc kubenswrapper[5010]: I1126 15:49:06.999647 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:49:07 crc kubenswrapper[5010]: I1126 15:49:07.073798 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-684f7c765c-ssxjs"] Nov 26 15:49:07 crc kubenswrapper[5010]: I1126 15:49:07.074303 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerName="dnsmasq-dns" containerID="cri-o://d07adbf8132600c4f8b5f7ca9c691c8503c55f456d7270b7060a9c6d3dfafa76" gracePeriod=10 Nov 26 15:49:07 crc kubenswrapper[5010]: I1126 15:49:07.121265 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.138:5353: connect: connection refused" Nov 26 15:49:07 crc kubenswrapper[5010]: I1126 15:49:07.598173 5010 generic.go:334] "Generic (PLEG): container finished" podID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerID="d07adbf8132600c4f8b5f7ca9c691c8503c55f456d7270b7060a9c6d3dfafa76" exitCode=0 Nov 26 15:49:07 crc kubenswrapper[5010]: I1126 15:49:07.598221 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" event={"ID":"673c6759-4cfb-4eb1-9e13-81b318c258ff","Type":"ContainerDied","Data":"d07adbf8132600c4f8b5f7ca9c691c8503c55f456d7270b7060a9c6d3dfafa76"} Nov 26 15:49:08 crc kubenswrapper[5010]: W1126 15:49:08.473868 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4496d49_7b88_4c60_9fd5_fe0608f52b13.slice/crio-b529a7981f6f67cb8a08573e1393b0118bc3ca94b7b7eb6bc05ba8c121157c90 WatchSource:0}: Error finding container b529a7981f6f67cb8a08573e1393b0118bc3ca94b7b7eb6bc05ba8c121157c90: Status 404 returned error can't find the container with id b529a7981f6f67cb8a08573e1393b0118bc3ca94b7b7eb6bc05ba8c121157c90 Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.615645 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-58b6s" event={"ID":"56eb624a-00e8-476d-b468-aa83bc64faad","Type":"ContainerDied","Data":"49534570a907319f553695e595c01f0f37885a56d6483f028a15bd13ed49881a"} Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.615733 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49534570a907319f553695e595c01f0f37885a56d6483f028a15bd13ed49881a" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.618917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4496d49-7b88-4c60-9fd5-fe0608f52b13","Type":"ContainerStarted","Data":"b529a7981f6f67cb8a08573e1393b0118bc3ca94b7b7eb6bc05ba8c121157c90"} Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.674497 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.793623 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-credential-keys\") pod \"56eb624a-00e8-476d-b468-aa83bc64faad\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.793683 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-config-data\") pod \"56eb624a-00e8-476d-b468-aa83bc64faad\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.793794 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-fernet-keys\") pod \"56eb624a-00e8-476d-b468-aa83bc64faad\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.793854 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tggw\" (UniqueName: \"kubernetes.io/projected/56eb624a-00e8-476d-b468-aa83bc64faad-kube-api-access-9tggw\") pod \"56eb624a-00e8-476d-b468-aa83bc64faad\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.794982 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-scripts\") pod \"56eb624a-00e8-476d-b468-aa83bc64faad\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.795120 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-combined-ca-bundle\") pod \"56eb624a-00e8-476d-b468-aa83bc64faad\" (UID: \"56eb624a-00e8-476d-b468-aa83bc64faad\") " Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.801880 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-scripts" (OuterVolumeSpecName: "scripts") pod "56eb624a-00e8-476d-b468-aa83bc64faad" (UID: "56eb624a-00e8-476d-b468-aa83bc64faad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.804239 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "56eb624a-00e8-476d-b468-aa83bc64faad" (UID: "56eb624a-00e8-476d-b468-aa83bc64faad"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.804297 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "56eb624a-00e8-476d-b468-aa83bc64faad" (UID: "56eb624a-00e8-476d-b468-aa83bc64faad"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.804411 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56eb624a-00e8-476d-b468-aa83bc64faad-kube-api-access-9tggw" (OuterVolumeSpecName: "kube-api-access-9tggw") pod "56eb624a-00e8-476d-b468-aa83bc64faad" (UID: "56eb624a-00e8-476d-b468-aa83bc64faad"). InnerVolumeSpecName "kube-api-access-9tggw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.836242 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "56eb624a-00e8-476d-b468-aa83bc64faad" (UID: "56eb624a-00e8-476d-b468-aa83bc64faad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.856525 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-config-data" (OuterVolumeSpecName: "config-data") pod "56eb624a-00e8-476d-b468-aa83bc64faad" (UID: "56eb624a-00e8-476d-b468-aa83bc64faad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.897473 5010 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.897547 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tggw\" (UniqueName: \"kubernetes.io/projected/56eb624a-00e8-476d-b468-aa83bc64faad-kube-api-access-9tggw\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.897558 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.897589 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.897599 5010 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.897606 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/56eb624a-00e8-476d-b468-aa83bc64faad-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:08 crc kubenswrapper[5010]: I1126 15:49:08.976857 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-546d9f9b4-87p6s"] Nov 26 15:49:08 crc kubenswrapper[5010]: W1126 15:49:08.990587 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9651251a_a0b2_4db8_bb82_b22a707bd7ab.slice/crio-7988dac9d01369a5dd02344b2c07e802c64f5747e521359b11858e905402a6fc WatchSource:0}: Error finding container 7988dac9d01369a5dd02344b2c07e802c64f5747e521359b11858e905402a6fc: Status 404 returned error can't find the container with id 7988dac9d01369a5dd02344b2c07e802c64f5747e521359b11858e905402a6fc Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.633731 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-546d9f9b4-87p6s" event={"ID":"9651251a-a0b2-4db8-bb82-b22a707bd7ab","Type":"ContainerStarted","Data":"7988dac9d01369a5dd02344b2c07e802c64f5747e521359b11858e905402a6fc"} Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.636404 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-58b6s" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.868556 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-7b9b5b699d-rh4fw"] Nov 26 15:49:09 crc kubenswrapper[5010]: E1126 15:49:09.869028 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56eb624a-00e8-476d-b468-aa83bc64faad" containerName="keystone-bootstrap" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.869042 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="56eb624a-00e8-476d-b468-aa83bc64faad" containerName="keystone-bootstrap" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.869242 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="56eb624a-00e8-476d-b468-aa83bc64faad" containerName="keystone-bootstrap" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.869881 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.878799 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-xzc9m" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.879141 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.879420 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.879767 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.879990 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.880277 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 15:49:09 crc kubenswrapper[5010]: I1126 15:49:09.886308 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7b9b5b699d-rh4fw"] Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.018577 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-internal-tls-certs\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.018664 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-config-data\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.018806 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-fernet-keys\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.018844 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-public-tls-certs\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.018901 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-combined-ca-bundle\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.018936 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-scripts\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.019225 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6p6p2\" (UniqueName: \"kubernetes.io/projected/d6093731-a529-4e5b-94bd-4948ab30cedc-kube-api-access-6p6p2\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.019308 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-credential-keys\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.120932 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6p6p2\" (UniqueName: \"kubernetes.io/projected/d6093731-a529-4e5b-94bd-4948ab30cedc-kube-api-access-6p6p2\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.120997 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-credential-keys\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.121082 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-internal-tls-certs\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.121128 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-config-data\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.121155 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-fernet-keys\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.121190 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-public-tls-certs\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.121232 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-combined-ca-bundle\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.121261 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-scripts\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.126759 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-credential-keys\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.126768 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-internal-tls-certs\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.127782 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-scripts\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.128765 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-config-data\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.132118 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-public-tls-certs\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.133362 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-fernet-keys\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.137170 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-combined-ca-bundle\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.142886 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6p6p2\" (UniqueName: \"kubernetes.io/projected/d6093731-a529-4e5b-94bd-4948ab30cedc-kube-api-access-6p6p2\") pod \"keystone-7b9b5b699d-rh4fw\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.197142 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.666976 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4496d49-7b88-4c60-9fd5-fe0608f52b13","Type":"ContainerStarted","Data":"79c2fb1f54ff9a95cc4dcaa6cb5962ca09af991addf2ded2b30b24b2b7bbdfbe"} Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.671158 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"13084c10-bc6a-48a7-8624-a405f5d06e3d","Type":"ContainerStarted","Data":"e873d5eecdcfbd7db00f4e712168ecab4bbdea7c34c56fa735dedfc748d1b292"} Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.674609 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-546d9f9b4-87p6s" event={"ID":"9651251a-a0b2-4db8-bb82-b22a707bd7ab","Type":"ContainerStarted","Data":"5333a0de78b475fd78f332fa0f32083caa1395fc128350a6a203fa02b8019334"} Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.695946 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-7b9b5b699d-rh4fw"] Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.708327 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.708304069 podStartE2EDuration="9.708304069s" podCreationTimestamp="2025-11-26 15:49:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:10.698127774 +0000 UTC m=+1371.488844922" watchObservedRunningTime="2025-11-26 15:49:10.708304069 +0000 UTC m=+1371.499021217" Nov 26 15:49:10 crc kubenswrapper[5010]: W1126 15:49:10.719639 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6093731_a529_4e5b_94bd_4948ab30cedc.slice/crio-ebfc8ca5daa68fe70d7f0bc633588997ab46efd2051f186e81f4453b1902d1e9 WatchSource:0}: Error finding container ebfc8ca5daa68fe70d7f0bc633588997ab46efd2051f186e81f4453b1902d1e9: Status 404 returned error can't find the container with id ebfc8ca5daa68fe70d7f0bc633588997ab46efd2051f186e81f4453b1902d1e9 Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.813124 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.936993 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slcwj\" (UniqueName: \"kubernetes.io/projected/673c6759-4cfb-4eb1-9e13-81b318c258ff-kube-api-access-slcwj\") pod \"673c6759-4cfb-4eb1-9e13-81b318c258ff\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.937330 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-swift-storage-0\") pod \"673c6759-4cfb-4eb1-9e13-81b318c258ff\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.937523 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-nb\") pod \"673c6759-4cfb-4eb1-9e13-81b318c258ff\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.937565 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-svc\") pod \"673c6759-4cfb-4eb1-9e13-81b318c258ff\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.937603 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-config\") pod \"673c6759-4cfb-4eb1-9e13-81b318c258ff\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.937638 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-sb\") pod \"673c6759-4cfb-4eb1-9e13-81b318c258ff\" (UID: \"673c6759-4cfb-4eb1-9e13-81b318c258ff\") " Nov 26 15:49:10 crc kubenswrapper[5010]: I1126 15:49:10.942675 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/673c6759-4cfb-4eb1-9e13-81b318c258ff-kube-api-access-slcwj" (OuterVolumeSpecName: "kube-api-access-slcwj") pod "673c6759-4cfb-4eb1-9e13-81b318c258ff" (UID: "673c6759-4cfb-4eb1-9e13-81b318c258ff"). InnerVolumeSpecName "kube-api-access-slcwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.011476 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-config" (OuterVolumeSpecName: "config") pod "673c6759-4cfb-4eb1-9e13-81b318c258ff" (UID: "673c6759-4cfb-4eb1-9e13-81b318c258ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.014793 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "673c6759-4cfb-4eb1-9e13-81b318c258ff" (UID: "673c6759-4cfb-4eb1-9e13-81b318c258ff"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.021139 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "673c6759-4cfb-4eb1-9e13-81b318c258ff" (UID: "673c6759-4cfb-4eb1-9e13-81b318c258ff"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.026270 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "673c6759-4cfb-4eb1-9e13-81b318c258ff" (UID: "673c6759-4cfb-4eb1-9e13-81b318c258ff"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.035444 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "673c6759-4cfb-4eb1-9e13-81b318c258ff" (UID: "673c6759-4cfb-4eb1-9e13-81b318c258ff"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.039555 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.039584 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.039593 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.039602 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.039613 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slcwj\" (UniqueName: \"kubernetes.io/projected/673c6759-4cfb-4eb1-9e13-81b318c258ff-kube-api-access-slcwj\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.039623 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/673c6759-4cfb-4eb1-9e13-81b318c258ff-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.711392 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerStarted","Data":"dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a"} Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.715541 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7b9b5b699d-rh4fw" event={"ID":"d6093731-a529-4e5b-94bd-4948ab30cedc","Type":"ContainerStarted","Data":"b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d"} Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.715576 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7b9b5b699d-rh4fw" event={"ID":"d6093731-a529-4e5b-94bd-4948ab30cedc","Type":"ContainerStarted","Data":"ebfc8ca5daa68fe70d7f0bc633588997ab46efd2051f186e81f4453b1902d1e9"} Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.715605 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.719540 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" event={"ID":"673c6759-4cfb-4eb1-9e13-81b318c258ff","Type":"ContainerDied","Data":"80e4a27749d3d0cb4a65270b07cef11790f5563e16b5ed4cbacde52328053fc6"} Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.719577 5010 scope.go:117] "RemoveContainer" containerID="d07adbf8132600c4f8b5f7ca9c691c8503c55f456d7270b7060a9c6d3dfafa76" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.719600 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-684f7c765c-ssxjs" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.727645 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-546d9f9b4-87p6s" event={"ID":"9651251a-a0b2-4db8-bb82-b22a707bd7ab","Type":"ContainerStarted","Data":"a86003926de01550b467b33cbf762fa3bc24eb67a06d8b70ca85b43666377672"} Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.728661 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.728700 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.744348 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-7b9b5b699d-rh4fw" podStartSLOduration=2.744320244 podStartE2EDuration="2.744320244s" podCreationTimestamp="2025-11-26 15:49:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:11.735602636 +0000 UTC m=+1372.526319784" watchObservedRunningTime="2025-11-26 15:49:11.744320244 +0000 UTC m=+1372.535037432" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.748999 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4496d49-7b88-4c60-9fd5-fe0608f52b13","Type":"ContainerStarted","Data":"f5c52d88f44b865b5096b50805d5e2f59cef8516541529d9fdf1ac840da5d9c1"} Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.760489 5010 scope.go:117] "RemoveContainer" containerID="79f30bb2ec9f2ad28f78ee8fcc8869a8c2530704a78f3d15b7d8d177801d0d7d" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.779801 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-546d9f9b4-87p6s" podStartSLOduration=6.779777151 podStartE2EDuration="6.779777151s" podCreationTimestamp="2025-11-26 15:49:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:11.767031542 +0000 UTC m=+1372.557748690" watchObservedRunningTime="2025-11-26 15:49:11.779777151 +0000 UTC m=+1372.570494329" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.802601 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.802578422 podStartE2EDuration="10.802578422s" podCreationTimestamp="2025-11-26 15:49:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:11.798632953 +0000 UTC m=+1372.589350111" watchObservedRunningTime="2025-11-26 15:49:11.802578422 +0000 UTC m=+1372.593295580" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.832276 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-684f7c765c-ssxjs"] Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.842812 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-684f7c765c-ssxjs"] Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.905002 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" path="/var/lib/kubelet/pods/673c6759-4cfb-4eb1-9e13-81b318c258ff/volumes" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.965922 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.965976 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.974300 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.974355 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:11 crc kubenswrapper[5010]: I1126 15:49:11.997034 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.007433 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.016393 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.026360 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.752969 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x7zvb" event={"ID":"953ac15c-533c-4abd-ae8b-e5b8108da094","Type":"ContainerStarted","Data":"c9e20dc7a4328ee3612ea58196285a255ba3e587797e6fd20342a957928a6a6b"} Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.754440 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.754470 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.754484 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.754494 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:12 crc kubenswrapper[5010]: I1126 15:49:12.782936 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-x7zvb" podStartSLOduration=2.379014131 podStartE2EDuration="49.782919915s" podCreationTimestamp="2025-11-26 15:48:23 +0000 UTC" firstStartedPulling="2025-11-26 15:48:24.571938682 +0000 UTC m=+1325.362655830" lastFinishedPulling="2025-11-26 15:49:11.975844466 +0000 UTC m=+1372.766561614" observedRunningTime="2025-11-26 15:49:12.772141325 +0000 UTC m=+1373.562858483" watchObservedRunningTime="2025-11-26 15:49:12.782919915 +0000 UTC m=+1373.573637063" Nov 26 15:49:14 crc kubenswrapper[5010]: I1126 15:49:14.773589 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 15:49:14 crc kubenswrapper[5010]: I1126 15:49:14.975789 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:16 crc kubenswrapper[5010]: I1126 15:49:16.716534 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 15:49:23 crc kubenswrapper[5010]: E1126 15:49:23.691024 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" Nov 26 15:49:23 crc kubenswrapper[5010]: I1126 15:49:23.881748 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerStarted","Data":"bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272"} Nov 26 15:49:23 crc kubenswrapper[5010]: I1126 15:49:23.882197 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 15:49:23 crc kubenswrapper[5010]: I1126 15:49:23.882169 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="ceilometer-notification-agent" containerID="cri-o://b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5" gracePeriod=30 Nov 26 15:49:23 crc kubenswrapper[5010]: I1126 15:49:23.882341 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="sg-core" containerID="cri-o://dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a" gracePeriod=30 Nov 26 15:49:23 crc kubenswrapper[5010]: I1126 15:49:23.883205 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="proxy-httpd" containerID="cri-o://bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272" gracePeriod=30 Nov 26 15:49:23 crc kubenswrapper[5010]: I1126 15:49:23.904545 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hgfkn" event={"ID":"659b75fb-742f-4166-ab4b-e5015d05ccc1","Type":"ContainerStarted","Data":"e2648f8ab5e19664d085c18600c5012a94491dcb187ee60e1e3570ec0f86cc22"} Nov 26 15:49:24 crc kubenswrapper[5010]: I1126 15:49:24.906825 5010 generic.go:334] "Generic (PLEG): container finished" podID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerID="bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272" exitCode=0 Nov 26 15:49:24 crc kubenswrapper[5010]: I1126 15:49:24.907368 5010 generic.go:334] "Generic (PLEG): container finished" podID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerID="dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a" exitCode=2 Nov 26 15:49:24 crc kubenswrapper[5010]: I1126 15:49:24.906934 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerDied","Data":"bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272"} Nov 26 15:49:24 crc kubenswrapper[5010]: I1126 15:49:24.907443 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerDied","Data":"dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a"} Nov 26 15:49:24 crc kubenswrapper[5010]: I1126 15:49:24.939454 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-hgfkn" podStartSLOduration=3.263091675 podStartE2EDuration="1m1.939434547s" podCreationTimestamp="2025-11-26 15:48:23 +0000 UTC" firstStartedPulling="2025-11-26 15:48:24.572006823 +0000 UTC m=+1325.362723971" lastFinishedPulling="2025-11-26 15:49:23.248349695 +0000 UTC m=+1384.039066843" observedRunningTime="2025-11-26 15:49:24.931606961 +0000 UTC m=+1385.722324119" watchObservedRunningTime="2025-11-26 15:49:24.939434547 +0000 UTC m=+1385.730151695" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.772564 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.853422 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-run-httpd\") pod \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.853565 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-log-httpd\") pod \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.853624 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-combined-ca-bundle\") pod \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.853681 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-scripts\") pod \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.853763 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rn4k\" (UniqueName: \"kubernetes.io/projected/dd3c6ef7-71bd-4191-b26a-b56464ec9772-kube-api-access-9rn4k\") pod \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.853837 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-config-data\") pod \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.853869 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-sg-core-conf-yaml\") pod \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\" (UID: \"dd3c6ef7-71bd-4191-b26a-b56464ec9772\") " Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.854041 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "dd3c6ef7-71bd-4191-b26a-b56464ec9772" (UID: "dd3c6ef7-71bd-4191-b26a-b56464ec9772"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.854635 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.855267 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "dd3c6ef7-71bd-4191-b26a-b56464ec9772" (UID: "dd3c6ef7-71bd-4191-b26a-b56464ec9772"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.864059 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd3c6ef7-71bd-4191-b26a-b56464ec9772-kube-api-access-9rn4k" (OuterVolumeSpecName: "kube-api-access-9rn4k") pod "dd3c6ef7-71bd-4191-b26a-b56464ec9772" (UID: "dd3c6ef7-71bd-4191-b26a-b56464ec9772"). InnerVolumeSpecName "kube-api-access-9rn4k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.869034 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-scripts" (OuterVolumeSpecName: "scripts") pod "dd3c6ef7-71bd-4191-b26a-b56464ec9772" (UID: "dd3c6ef7-71bd-4191-b26a-b56464ec9772"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.888996 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "dd3c6ef7-71bd-4191-b26a-b56464ec9772" (UID: "dd3c6ef7-71bd-4191-b26a-b56464ec9772"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.912693 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd3c6ef7-71bd-4191-b26a-b56464ec9772" (UID: "dd3c6ef7-71bd-4191-b26a-b56464ec9772"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.925251 5010 generic.go:334] "Generic (PLEG): container finished" podID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerID="b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5" exitCode=0 Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.925354 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.947152 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-config-data" (OuterVolumeSpecName: "config-data") pod "dd3c6ef7-71bd-4191-b26a-b56464ec9772" (UID: "dd3c6ef7-71bd-4191-b26a-b56464ec9772"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.956521 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/dd3c6ef7-71bd-4191-b26a-b56464ec9772-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.956566 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.956586 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.956606 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rn4k\" (UniqueName: \"kubernetes.io/projected/dd3c6ef7-71bd-4191-b26a-b56464ec9772-kube-api-access-9rn4k\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.956620 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.956631 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/dd3c6ef7-71bd-4191-b26a-b56464ec9772-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.994845 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerDied","Data":"b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5"} Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.994891 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"dd3c6ef7-71bd-4191-b26a-b56464ec9772","Type":"ContainerDied","Data":"18aff558ab93214e4696af1b7e166dc365ba95b58a044735dcead1be169b709b"} Nov 26 15:49:25 crc kubenswrapper[5010]: I1126 15:49:25.994915 5010 scope.go:117] "RemoveContainer" containerID="bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.016995 5010 scope.go:117] "RemoveContainer" containerID="dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.034923 5010 scope.go:117] "RemoveContainer" containerID="b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.060179 5010 scope.go:117] "RemoveContainer" containerID="bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272" Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.060565 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272\": container with ID starting with bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272 not found: ID does not exist" containerID="bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.060612 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272"} err="failed to get container status \"bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272\": rpc error: code = NotFound desc = could not find container \"bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272\": container with ID starting with bacf6d8aec30f3d4194e8df5c3e3da6398d8db16c12ce6b8171790dfce075272 not found: ID does not exist" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.060640 5010 scope.go:117] "RemoveContainer" containerID="dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a" Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.061107 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a\": container with ID starting with dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a not found: ID does not exist" containerID="dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.061138 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a"} err="failed to get container status \"dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a\": rpc error: code = NotFound desc = could not find container \"dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a\": container with ID starting with dcec57e112d3a95a535eedf251592e56480b7173bf391563479f3daed771863a not found: ID does not exist" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.061162 5010 scope.go:117] "RemoveContainer" containerID="b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5" Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.061409 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5\": container with ID starting with b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5 not found: ID does not exist" containerID="b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.061433 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5"} err="failed to get container status \"b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5\": rpc error: code = NotFound desc = could not find container \"b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5\": container with ID starting with b52d41151d5e4140c430e4a87f72d45852bf5cab16946f41a3dd3148923ff3b5 not found: ID does not exist" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.292734 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.301826 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.334555 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.335066 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerName="init" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335080 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerName="init" Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.335101 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="ceilometer-notification-agent" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335108 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="ceilometer-notification-agent" Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.335124 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="sg-core" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335131 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="sg-core" Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.335141 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="proxy-httpd" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335147 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="proxy-httpd" Nov 26 15:49:26 crc kubenswrapper[5010]: E1126 15:49:26.335167 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerName="dnsmasq-dns" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335172 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerName="dnsmasq-dns" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335358 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="673c6759-4cfb-4eb1-9e13-81b318c258ff" containerName="dnsmasq-dns" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335371 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="proxy-httpd" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335386 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="sg-core" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.335401 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" containerName="ceilometer-notification-agent" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.339415 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.346757 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.347130 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.352948 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.467180 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-config-data\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.467289 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwxps\" (UniqueName: \"kubernetes.io/projected/d6950462-52b0-49ad-b85d-d2372ff22aa8-kube-api-access-xwxps\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.467752 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.467812 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-log-httpd\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.467965 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-run-httpd\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.468039 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.468154 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-scripts\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570130 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570211 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-log-httpd\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570273 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-run-httpd\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570316 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570389 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-scripts\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570434 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-config-data\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570490 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwxps\" (UniqueName: \"kubernetes.io/projected/d6950462-52b0-49ad-b85d-d2372ff22aa8-kube-api-access-xwxps\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.570982 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-run-httpd\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.571867 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-log-httpd\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.576769 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.577680 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-scripts\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.578546 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.591649 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-config-data\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.596222 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwxps\" (UniqueName: \"kubernetes.io/projected/d6950462-52b0-49ad-b85d-d2372ff22aa8-kube-api-access-xwxps\") pod \"ceilometer-0\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " pod="openstack/ceilometer-0" Nov 26 15:49:26 crc kubenswrapper[5010]: I1126 15:49:26.671565 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:27 crc kubenswrapper[5010]: I1126 15:49:27.218325 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:27 crc kubenswrapper[5010]: W1126 15:49:27.226816 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6950462_52b0_49ad_b85d_d2372ff22aa8.slice/crio-bd5c6e42f45fef7f009660f5236398d5c09c05f8e84ac2f691045ba8742078af WatchSource:0}: Error finding container bd5c6e42f45fef7f009660f5236398d5c09c05f8e84ac2f691045ba8742078af: Status 404 returned error can't find the container with id bd5c6e42f45fef7f009660f5236398d5c09c05f8e84ac2f691045ba8742078af Nov 26 15:49:27 crc kubenswrapper[5010]: I1126 15:49:27.902800 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd3c6ef7-71bd-4191-b26a-b56464ec9772" path="/var/lib/kubelet/pods/dd3c6ef7-71bd-4191-b26a-b56464ec9772/volumes" Nov 26 15:49:27 crc kubenswrapper[5010]: I1126 15:49:27.947983 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerStarted","Data":"bd5c6e42f45fef7f009660f5236398d5c09c05f8e84ac2f691045ba8742078af"} Nov 26 15:49:28 crc kubenswrapper[5010]: I1126 15:49:28.959548 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerStarted","Data":"f6b75f622de1f2fa6dd717254a3425121af55e3b07e8946351d238dacc57d359"} Nov 26 15:49:29 crc kubenswrapper[5010]: I1126 15:49:29.971363 5010 generic.go:334] "Generic (PLEG): container finished" podID="953ac15c-533c-4abd-ae8b-e5b8108da094" containerID="c9e20dc7a4328ee3612ea58196285a255ba3e587797e6fd20342a957928a6a6b" exitCode=0 Nov 26 15:49:29 crc kubenswrapper[5010]: I1126 15:49:29.971427 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x7zvb" event={"ID":"953ac15c-533c-4abd-ae8b-e5b8108da094","Type":"ContainerDied","Data":"c9e20dc7a4328ee3612ea58196285a255ba3e587797e6fd20342a957928a6a6b"} Nov 26 15:49:30 crc kubenswrapper[5010]: I1126 15:49:30.986521 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerStarted","Data":"4058ddc142f1ac9afdb6aff8eaa26bcc79c4c88fec2a045d7fc221ba0ddeb1c6"} Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.371901 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.457221 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mkscq\" (UniqueName: \"kubernetes.io/projected/953ac15c-533c-4abd-ae8b-e5b8108da094-kube-api-access-mkscq\") pod \"953ac15c-533c-4abd-ae8b-e5b8108da094\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.457296 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-db-sync-config-data\") pod \"953ac15c-533c-4abd-ae8b-e5b8108da094\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.457394 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-combined-ca-bundle\") pod \"953ac15c-533c-4abd-ae8b-e5b8108da094\" (UID: \"953ac15c-533c-4abd-ae8b-e5b8108da094\") " Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.463683 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "953ac15c-533c-4abd-ae8b-e5b8108da094" (UID: "953ac15c-533c-4abd-ae8b-e5b8108da094"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.463841 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/953ac15c-533c-4abd-ae8b-e5b8108da094-kube-api-access-mkscq" (OuterVolumeSpecName: "kube-api-access-mkscq") pod "953ac15c-533c-4abd-ae8b-e5b8108da094" (UID: "953ac15c-533c-4abd-ae8b-e5b8108da094"). InnerVolumeSpecName "kube-api-access-mkscq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.499704 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "953ac15c-533c-4abd-ae8b-e5b8108da094" (UID: "953ac15c-533c-4abd-ae8b-e5b8108da094"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.559450 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mkscq\" (UniqueName: \"kubernetes.io/projected/953ac15c-533c-4abd-ae8b-e5b8108da094-kube-api-access-mkscq\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.559497 5010 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.559506 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/953ac15c-533c-4abd-ae8b-e5b8108da094-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.996521 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-x7zvb" event={"ID":"953ac15c-533c-4abd-ae8b-e5b8108da094","Type":"ContainerDied","Data":"c1fc0255c888732ad147bb1b69fdaae5440def359eebc696d414bd1e411a8c3e"} Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.996599 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1fc0255c888732ad147bb1b69fdaae5440def359eebc696d414bd1e411a8c3e" Nov 26 15:49:31 crc kubenswrapper[5010]: I1126 15:49:31.996602 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-x7zvb" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.000556 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerStarted","Data":"9ceab6da40a2aaa46658284403150210370998d40158f78dfd12be664ad17edd"} Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.434253 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-cd69b7494-nmz2d"] Nov 26 15:49:32 crc kubenswrapper[5010]: E1126 15:49:32.434648 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="953ac15c-533c-4abd-ae8b-e5b8108da094" containerName="barbican-db-sync" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.434659 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="953ac15c-533c-4abd-ae8b-e5b8108da094" containerName="barbican-db-sync" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.434845 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="953ac15c-533c-4abd-ae8b-e5b8108da094" containerName="barbican-db-sync" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.435735 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.446250 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.446430 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-v9t4g" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.446580 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.468783 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-cd69b7494-nmz2d"] Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.499504 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7fbcbc6747-lkhxw"] Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.520671 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fbcbc6747-lkhxw"] Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.524826 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.528059 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585155 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9687c9f4-9131-4c43-a1f2-2faf3040e499-logs\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585203 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-combined-ca-bundle\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585222 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585285 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsxsl\" (UniqueName: \"kubernetes.io/projected/9687c9f4-9131-4c43-a1f2-2faf3040e499-kube-api-access-qsxsl\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585316 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-combined-ca-bundle\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585335 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-logs\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585362 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data-custom\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585392 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8z8v\" (UniqueName: \"kubernetes.io/projected/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-kube-api-access-w8z8v\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585415 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.585435 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data-custom\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.601842 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5578448889-mgm67"] Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.603794 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.627449 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5578448889-mgm67"] Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.689999 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44j7b\" (UniqueName: \"kubernetes.io/projected/f07f7850-8e0e-401a-9f35-3d0d0ac38711-kube-api-access-44j7b\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690057 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-sb\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690094 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsxsl\" (UniqueName: \"kubernetes.io/projected/9687c9f4-9131-4c43-a1f2-2faf3040e499-kube-api-access-qsxsl\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690136 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-combined-ca-bundle\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690177 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-logs\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690232 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data-custom\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690268 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8z8v\" (UniqueName: \"kubernetes.io/projected/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-kube-api-access-w8z8v\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690292 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-nb\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690315 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690340 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data-custom\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690386 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-swift-storage-0\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690420 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-config\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690449 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9687c9f4-9131-4c43-a1f2-2faf3040e499-logs\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690476 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-combined-ca-bundle\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690494 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.690527 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.694148 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-logs\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.696810 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9687c9f4-9131-4c43-a1f2-2faf3040e499-logs\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.697004 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data-custom\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.699657 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-combined-ca-bundle\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.700575 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-combined-ca-bundle\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.701114 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.711426 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-94c89cc6d-zkrdd"] Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.713039 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.718412 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsxsl\" (UniqueName: \"kubernetes.io/projected/9687c9f4-9131-4c43-a1f2-2faf3040e499-kube-api-access-qsxsl\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.721362 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.732385 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8z8v\" (UniqueName: \"kubernetes.io/projected/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-kube-api-access-w8z8v\") pod \"barbican-worker-7fbcbc6747-lkhxw\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.734115 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.745137 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-94c89cc6d-zkrdd"] Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.748316 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data-custom\") pod \"barbican-keystone-listener-cd69b7494-nmz2d\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.771635 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.792533 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rpns\" (UniqueName: \"kubernetes.io/projected/f1e7fbb2-de0b-4911-b314-db803d9f9d77-kube-api-access-4rpns\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.792586 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.792653 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-swift-storage-0\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.792783 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-combined-ca-bundle\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.792887 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-config\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.793030 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.793111 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data-custom\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.793177 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44j7b\" (UniqueName: \"kubernetes.io/projected/f07f7850-8e0e-401a-9f35-3d0d0ac38711-kube-api-access-44j7b\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.793241 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-sb\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.793355 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e7fbb2-de0b-4911-b314-db803d9f9d77-logs\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.793601 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-nb\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.793620 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-swift-storage-0\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.794339 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-sb\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.794462 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-nb\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.798390 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-config\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.800816 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.822556 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44j7b\" (UniqueName: \"kubernetes.io/projected/f07f7850-8e0e-401a-9f35-3d0d0ac38711-kube-api-access-44j7b\") pod \"dnsmasq-dns-5578448889-mgm67\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.865186 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.895563 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rpns\" (UniqueName: \"kubernetes.io/projected/f1e7fbb2-de0b-4911-b314-db803d9f9d77-kube-api-access-4rpns\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.895634 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.895683 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-combined-ca-bundle\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.895829 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data-custom\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.895890 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e7fbb2-de0b-4911-b314-db803d9f9d77-logs\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.896551 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e7fbb2-de0b-4911-b314-db803d9f9d77-logs\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.900743 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.901774 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data-custom\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.903497 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-combined-ca-bundle\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.918289 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rpns\" (UniqueName: \"kubernetes.io/projected/f1e7fbb2-de0b-4911-b314-db803d9f9d77-kube-api-access-4rpns\") pod \"barbican-api-94c89cc6d-zkrdd\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:32 crc kubenswrapper[5010]: I1126 15:49:32.942563 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.112958 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xspb2"] Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.116774 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.128766 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xspb2"] Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.168279 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.202220 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-catalog-content\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.202964 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-utilities\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.203100 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vc2bd\" (UniqueName: \"kubernetes.io/projected/cb11973b-5bc3-4f90-979d-921d29b03c0a-kube-api-access-vc2bd\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.305152 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-catalog-content\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.305290 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-utilities\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.305318 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vc2bd\" (UniqueName: \"kubernetes.io/projected/cb11973b-5bc3-4f90-979d-921d29b03c0a-kube-api-access-vc2bd\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.305909 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-catalog-content\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.306129 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-utilities\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.331023 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vc2bd\" (UniqueName: \"kubernetes.io/projected/cb11973b-5bc3-4f90-979d-921d29b03c0a-kube-api-access-vc2bd\") pod \"redhat-operators-xspb2\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:33 crc kubenswrapper[5010]: I1126 15:49:33.442929 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:34 crc kubenswrapper[5010]: I1126 15:49:34.177639 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-cd69b7494-nmz2d"] Nov 26 15:49:34 crc kubenswrapper[5010]: I1126 15:49:34.191246 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-94c89cc6d-zkrdd"] Nov 26 15:49:34 crc kubenswrapper[5010]: I1126 15:49:34.200417 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5578448889-mgm67"] Nov 26 15:49:34 crc kubenswrapper[5010]: W1126 15:49:34.216857 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1e7fbb2_de0b_4911_b314_db803d9f9d77.slice/crio-7f07518e931a80021a6f54275ea0216e4acacab629c6372ef99aa37b1becc506 WatchSource:0}: Error finding container 7f07518e931a80021a6f54275ea0216e4acacab629c6372ef99aa37b1becc506: Status 404 returned error can't find the container with id 7f07518e931a80021a6f54275ea0216e4acacab629c6372ef99aa37b1becc506 Nov 26 15:49:34 crc kubenswrapper[5010]: W1126 15:49:34.219275 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf07f7850_8e0e_401a_9f35_3d0d0ac38711.slice/crio-a7a7d619cb2feab132c5b330dce62ea8ac601af60f3e8922791dee79db61adde WatchSource:0}: Error finding container a7a7d619cb2feab132c5b330dce62ea8ac601af60f3e8922791dee79db61adde: Status 404 returned error can't find the container with id a7a7d619cb2feab132c5b330dce62ea8ac601af60f3e8922791dee79db61adde Nov 26 15:49:34 crc kubenswrapper[5010]: I1126 15:49:34.351189 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fbcbc6747-lkhxw"] Nov 26 15:49:34 crc kubenswrapper[5010]: I1126 15:49:34.452609 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xspb2"] Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.051034 5010 generic.go:334] "Generic (PLEG): container finished" podID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerID="2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec" exitCode=0 Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.051468 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5578448889-mgm67" event={"ID":"f07f7850-8e0e-401a-9f35-3d0d0ac38711","Type":"ContainerDied","Data":"2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.051500 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5578448889-mgm67" event={"ID":"f07f7850-8e0e-401a-9f35-3d0d0ac38711","Type":"ContainerStarted","Data":"a7a7d619cb2feab132c5b330dce62ea8ac601af60f3e8922791dee79db61adde"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.087238 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerStarted","Data":"13b66a1f285ddcb948c8ca304aaf0218edb08294e8f905e6f8bdc721290217c4"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.087849 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.099145 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" event={"ID":"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a","Type":"ContainerStarted","Data":"0d2fe0a9b060e3461b31c86db2c4b82939d2c7fdc590eb737fa7c3bacd3ec9ee"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.113201 5010 generic.go:334] "Generic (PLEG): container finished" podID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerID="fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d" exitCode=0 Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.113345 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xspb2" event={"ID":"cb11973b-5bc3-4f90-979d-921d29b03c0a","Type":"ContainerDied","Data":"fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.113387 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xspb2" event={"ID":"cb11973b-5bc3-4f90-979d-921d29b03c0a","Type":"ContainerStarted","Data":"3d89efe2146dc9c76f1094f848e25c1ebafde81281415c39394a3b16bb05b10b"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.153168 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94c89cc6d-zkrdd" event={"ID":"f1e7fbb2-de0b-4911-b314-db803d9f9d77","Type":"ContainerStarted","Data":"de289b713a8647e5a6edd6ea75983aa642bc11037f6380f5ab8fb186a9aefc82"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.153237 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94c89cc6d-zkrdd" event={"ID":"f1e7fbb2-de0b-4911-b314-db803d9f9d77","Type":"ContainerStarted","Data":"cdde83ebdc722c4c90f4ab58cc6868315adb9fed02e7f54d20177203d3502628"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.153248 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94c89cc6d-zkrdd" event={"ID":"f1e7fbb2-de0b-4911-b314-db803d9f9d77","Type":"ContainerStarted","Data":"7f07518e931a80021a6f54275ea0216e4acacab629c6372ef99aa37b1becc506"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.154247 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.154284 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.156207 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" event={"ID":"9687c9f4-9131-4c43-a1f2-2faf3040e499","Type":"ContainerStarted","Data":"86ed3de104751ba238c1775bc15e2c8bbb8db23a69c23d9e4ff0323e7d429630"} Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.232550 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.687592646 podStartE2EDuration="9.232526876s" podCreationTimestamp="2025-11-26 15:49:26 +0000 UTC" firstStartedPulling="2025-11-26 15:49:27.230386895 +0000 UTC m=+1388.021104053" lastFinishedPulling="2025-11-26 15:49:33.775321135 +0000 UTC m=+1394.566038283" observedRunningTime="2025-11-26 15:49:35.162883404 +0000 UTC m=+1395.953600552" watchObservedRunningTime="2025-11-26 15:49:35.232526876 +0000 UTC m=+1396.023244024" Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.297127 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-94c89cc6d-zkrdd" podStartSLOduration=3.297103082 podStartE2EDuration="3.297103082s" podCreationTimestamp="2025-11-26 15:49:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:35.289318587 +0000 UTC m=+1396.080035745" watchObservedRunningTime="2025-11-26 15:49:35.297103082 +0000 UTC m=+1396.087820230" Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.779997 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 15:49:35 crc kubenswrapper[5010]: I1126 15:49:35.803892 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.196492 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5578448889-mgm67" event={"ID":"f07f7850-8e0e-401a-9f35-3d0d0ac38711","Type":"ContainerStarted","Data":"cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e"} Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.226521 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5578448889-mgm67" podStartSLOduration=4.226501811 podStartE2EDuration="4.226501811s" podCreationTimestamp="2025-11-26 15:49:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:36.220407458 +0000 UTC m=+1397.011124616" watchObservedRunningTime="2025-11-26 15:49:36.226501811 +0000 UTC m=+1397.017218969" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.503463 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-587c687588-ztm89"] Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.505010 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.507951 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.509031 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.570942 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-587c687588-ztm89"] Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.584092 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e65ad49-eec3-460d-aa80-0880c5e2e86b-logs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.584148 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-internal-tls-certs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.584180 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-combined-ca-bundle\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.584215 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-public-tls-certs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.584247 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwr8w\" (UniqueName: \"kubernetes.io/projected/0e65ad49-eec3-460d-aa80-0880c5e2e86b-kube-api-access-hwr8w\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.584275 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.584352 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data-custom\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.686641 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e65ad49-eec3-460d-aa80-0880c5e2e86b-logs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.686703 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-internal-tls-certs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.686747 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-combined-ca-bundle\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.686780 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-public-tls-certs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.686808 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwr8w\" (UniqueName: \"kubernetes.io/projected/0e65ad49-eec3-460d-aa80-0880c5e2e86b-kube-api-access-hwr8w\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.686852 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.686937 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data-custom\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.688081 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e65ad49-eec3-460d-aa80-0880c5e2e86b-logs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.694828 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-combined-ca-bundle\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.695583 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-public-tls-certs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.696124 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data-custom\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.698473 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.703269 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-internal-tls-certs\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.711618 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwr8w\" (UniqueName: \"kubernetes.io/projected/0e65ad49-eec3-460d-aa80-0880c5e2e86b-kube-api-access-hwr8w\") pod \"barbican-api-587c687588-ztm89\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:36 crc kubenswrapper[5010]: I1126 15:49:36.835308 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:37 crc kubenswrapper[5010]: I1126 15:49:37.211450 5010 generic.go:334] "Generic (PLEG): container finished" podID="659b75fb-742f-4166-ab4b-e5015d05ccc1" containerID="e2648f8ab5e19664d085c18600c5012a94491dcb187ee60e1e3570ec0f86cc22" exitCode=0 Nov 26 15:49:37 crc kubenswrapper[5010]: I1126 15:49:37.211573 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hgfkn" event={"ID":"659b75fb-742f-4166-ab4b-e5015d05ccc1","Type":"ContainerDied","Data":"e2648f8ab5e19664d085c18600c5012a94491dcb187ee60e1e3570ec0f86cc22"} Nov 26 15:49:37 crc kubenswrapper[5010]: I1126 15:49:37.212478 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:37 crc kubenswrapper[5010]: I1126 15:49:37.339419 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:37 crc kubenswrapper[5010]: I1126 15:49:37.342963 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:49:37 crc kubenswrapper[5010]: I1126 15:49:37.914949 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-587c687588-ztm89"] Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.235927 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" event={"ID":"9687c9f4-9131-4c43-a1f2-2faf3040e499","Type":"ContainerStarted","Data":"1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904"} Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.235973 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" event={"ID":"9687c9f4-9131-4c43-a1f2-2faf3040e499","Type":"ContainerStarted","Data":"ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e"} Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.237087 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-587c687588-ztm89" event={"ID":"0e65ad49-eec3-460d-aa80-0880c5e2e86b","Type":"ContainerStarted","Data":"72d83b24cb89b212c1c88444156a9749ee449365d3a65080c7e3957737694b2b"} Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.247376 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" event={"ID":"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a","Type":"ContainerStarted","Data":"256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e"} Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.247433 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" event={"ID":"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a","Type":"ContainerStarted","Data":"91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e"} Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.261840 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xspb2" event={"ID":"cb11973b-5bc3-4f90-979d-921d29b03c0a","Type":"ContainerStarted","Data":"2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113"} Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.287629 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" podStartSLOduration=3.196001833 podStartE2EDuration="6.287606679s" podCreationTimestamp="2025-11-26 15:49:32 +0000 UTC" firstStartedPulling="2025-11-26 15:49:34.204979093 +0000 UTC m=+1394.995696241" lastFinishedPulling="2025-11-26 15:49:37.296583939 +0000 UTC m=+1398.087301087" observedRunningTime="2025-11-26 15:49:38.280355648 +0000 UTC m=+1399.071072796" watchObservedRunningTime="2025-11-26 15:49:38.287606679 +0000 UTC m=+1399.078323827" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.357986 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" podStartSLOduration=3.467830272 podStartE2EDuration="6.357964498s" podCreationTimestamp="2025-11-26 15:49:32 +0000 UTC" firstStartedPulling="2025-11-26 15:49:34.432678079 +0000 UTC m=+1395.223395227" lastFinishedPulling="2025-11-26 15:49:37.322812305 +0000 UTC m=+1398.113529453" observedRunningTime="2025-11-26 15:49:38.35401695 +0000 UTC m=+1399.144734108" watchObservedRunningTime="2025-11-26 15:49:38.357964498 +0000 UTC m=+1399.148681646" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.810561 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.938354 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-scripts\") pod \"659b75fb-742f-4166-ab4b-e5015d05ccc1\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.938732 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/659b75fb-742f-4166-ab4b-e5015d05ccc1-etc-machine-id\") pod \"659b75fb-742f-4166-ab4b-e5015d05ccc1\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.938775 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-config-data\") pod \"659b75fb-742f-4166-ab4b-e5015d05ccc1\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.938895 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-db-sync-config-data\") pod \"659b75fb-742f-4166-ab4b-e5015d05ccc1\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.938918 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/659b75fb-742f-4166-ab4b-e5015d05ccc1-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "659b75fb-742f-4166-ab4b-e5015d05ccc1" (UID: "659b75fb-742f-4166-ab4b-e5015d05ccc1"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.939042 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-combined-ca-bundle\") pod \"659b75fb-742f-4166-ab4b-e5015d05ccc1\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.939075 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hjzkp\" (UniqueName: \"kubernetes.io/projected/659b75fb-742f-4166-ab4b-e5015d05ccc1-kube-api-access-hjzkp\") pod \"659b75fb-742f-4166-ab4b-e5015d05ccc1\" (UID: \"659b75fb-742f-4166-ab4b-e5015d05ccc1\") " Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.939507 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/659b75fb-742f-4166-ab4b-e5015d05ccc1-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.945685 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "659b75fb-742f-4166-ab4b-e5015d05ccc1" (UID: "659b75fb-742f-4166-ab4b-e5015d05ccc1"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.946295 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-scripts" (OuterVolumeSpecName: "scripts") pod "659b75fb-742f-4166-ab4b-e5015d05ccc1" (UID: "659b75fb-742f-4166-ab4b-e5015d05ccc1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.946389 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/659b75fb-742f-4166-ab4b-e5015d05ccc1-kube-api-access-hjzkp" (OuterVolumeSpecName: "kube-api-access-hjzkp") pod "659b75fb-742f-4166-ab4b-e5015d05ccc1" (UID: "659b75fb-742f-4166-ab4b-e5015d05ccc1"). InnerVolumeSpecName "kube-api-access-hjzkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:38 crc kubenswrapper[5010]: I1126 15:49:38.985831 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "659b75fb-742f-4166-ab4b-e5015d05ccc1" (UID: "659b75fb-742f-4166-ab4b-e5015d05ccc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.004637 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-config-data" (OuterVolumeSpecName: "config-data") pod "659b75fb-742f-4166-ab4b-e5015d05ccc1" (UID: "659b75fb-742f-4166-ab4b-e5015d05ccc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.041325 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.041364 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.041378 5010 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.041406 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/659b75fb-742f-4166-ab4b-e5015d05ccc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.041421 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hjzkp\" (UniqueName: \"kubernetes.io/projected/659b75fb-742f-4166-ab4b-e5015d05ccc1-kube-api-access-hjzkp\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.283186 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-587c687588-ztm89" event={"ID":"0e65ad49-eec3-460d-aa80-0880c5e2e86b","Type":"ContainerStarted","Data":"118a80403c8effe28594f56bbbae9975efb6bb4ecc9f75c9df702170fd76f085"} Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.283272 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-587c687588-ztm89" event={"ID":"0e65ad49-eec3-460d-aa80-0880c5e2e86b","Type":"ContainerStarted","Data":"9defbd037a4a2f05eca15526ffb9c48bad32cd70369ffd0dc805ef3172852686"} Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.283297 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.283336 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.297816 5010 generic.go:334] "Generic (PLEG): container finished" podID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerID="2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113" exitCode=0 Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.297878 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xspb2" event={"ID":"cb11973b-5bc3-4f90-979d-921d29b03c0a","Type":"ContainerDied","Data":"2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113"} Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.302217 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-hgfkn" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.302261 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-hgfkn" event={"ID":"659b75fb-742f-4166-ab4b-e5015d05ccc1","Type":"ContainerDied","Data":"13d8f970af73f05516922d3baa064778e42699f9b19f0d569110e60bcb8f9139"} Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.302292 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13d8f970af73f05516922d3baa064778e42699f9b19f0d569110e60bcb8f9139" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.321424 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-587c687588-ztm89" podStartSLOduration=3.321403769 podStartE2EDuration="3.321403769s" podCreationTimestamp="2025-11-26 15:49:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:39.317272656 +0000 UTC m=+1400.107989814" watchObservedRunningTime="2025-11-26 15:49:39.321403769 +0000 UTC m=+1400.112120917" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.521561 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:39 crc kubenswrapper[5010]: E1126 15:49:39.522061 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="659b75fb-742f-4166-ab4b-e5015d05ccc1" containerName="cinder-db-sync" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.522078 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="659b75fb-742f-4166-ab4b-e5015d05ccc1" containerName="cinder-db-sync" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.522334 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="659b75fb-742f-4166-ab4b-e5015d05ccc1" containerName="cinder-db-sync" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.523718 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.526130 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.527273 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nq4j6" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.527448 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.527673 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.570189 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.653932 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcs5j\" (UniqueName: \"kubernetes.io/projected/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-kube-api-access-vcs5j\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.654018 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-scripts\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.654063 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.654109 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.654142 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.654193 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.686982 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5578448889-mgm67"] Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.687674 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5578448889-mgm67" podUID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerName="dnsmasq-dns" containerID="cri-o://cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e" gracePeriod=10 Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.719467 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f468f79cc-w6vst"] Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.722189 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755694 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-nb\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755805 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcs5j\" (UniqueName: \"kubernetes.io/projected/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-kube-api-access-vcs5j\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755834 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqkzz\" (UniqueName: \"kubernetes.io/projected/56f52142-3dc2-42d5-bfdf-4453c630b257-kube-api-access-bqkzz\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755872 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-scripts\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755900 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-swift-storage-0\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755918 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755948 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755963 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-svc\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.755990 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.756023 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-sb\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.756042 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.756066 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-config\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.758279 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.761286 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f468f79cc-w6vst"] Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.765802 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-scripts\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.767426 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.767988 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.773044 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.777528 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcs5j\" (UniqueName: \"kubernetes.io/projected/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-kube-api-access-vcs5j\") pod \"cinder-scheduler-0\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.830780 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.832773 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.837910 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.845425 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.857203 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858531 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-nb\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858561 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4122768-1341-48d8-8fb1-c0a60a4d9b43-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858580 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858601 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqkzz\" (UniqueName: \"kubernetes.io/projected/56f52142-3dc2-42d5-bfdf-4453c630b257-kube-api-access-bqkzz\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858641 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-swift-storage-0\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858673 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-svc\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858716 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4122768-1341-48d8-8fb1-c0a60a4d9b43-logs\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858732 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858751 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-sb\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858773 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-config\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858793 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858826 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-scripts\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.858851 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5flf\" (UniqueName: \"kubernetes.io/projected/f4122768-1341-48d8-8fb1-c0a60a4d9b43-kube-api-access-g5flf\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.860355 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-swift-storage-0\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.861160 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-sb\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.861293 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-svc\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.861809 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-nb\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.862624 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-config\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.883184 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqkzz\" (UniqueName: \"kubernetes.io/projected/56f52142-3dc2-42d5-bfdf-4453c630b257-kube-api-access-bqkzz\") pod \"dnsmasq-dns-5f468f79cc-w6vst\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966171 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4122768-1341-48d8-8fb1-c0a60a4d9b43-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966223 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966302 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4122768-1341-48d8-8fb1-c0a60a4d9b43-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966594 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4122768-1341-48d8-8fb1-c0a60a4d9b43-logs\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966619 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966732 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966907 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-scripts\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.966965 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5flf\" (UniqueName: \"kubernetes.io/projected/f4122768-1341-48d8-8fb1-c0a60a4d9b43-kube-api-access-g5flf\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.968460 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4122768-1341-48d8-8fb1-c0a60a4d9b43-logs\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.971258 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-scripts\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.973268 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.989235 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5flf\" (UniqueName: \"kubernetes.io/projected/f4122768-1341-48d8-8fb1-c0a60a4d9b43-kube-api-access-g5flf\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.993003 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data-custom\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:39 crc kubenswrapper[5010]: I1126 15:49:39.993357 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " pod="openstack/cinder-api-0" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.061066 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.075054 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.223568 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.276046 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44j7b\" (UniqueName: \"kubernetes.io/projected/f07f7850-8e0e-401a-9f35-3d0d0ac38711-kube-api-access-44j7b\") pod \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.276098 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-config\") pod \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.276263 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc\") pod \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.276355 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-nb\") pod \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.276436 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-swift-storage-0\") pod \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.276491 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-sb\") pod \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.307230 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f07f7850-8e0e-401a-9f35-3d0d0ac38711-kube-api-access-44j7b" (OuterVolumeSpecName: "kube-api-access-44j7b") pod "f07f7850-8e0e-401a-9f35-3d0d0ac38711" (UID: "f07f7850-8e0e-401a-9f35-3d0d0ac38711"). InnerVolumeSpecName "kube-api-access-44j7b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.338520 5010 generic.go:334] "Generic (PLEG): container finished" podID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerID="cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e" exitCode=0 Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.339833 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5578448889-mgm67" event={"ID":"f07f7850-8e0e-401a-9f35-3d0d0ac38711","Type":"ContainerDied","Data":"cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e"} Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.339911 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5578448889-mgm67" event={"ID":"f07f7850-8e0e-401a-9f35-3d0d0ac38711","Type":"ContainerDied","Data":"a7a7d619cb2feab132c5b330dce62ea8ac601af60f3e8922791dee79db61adde"} Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.339943 5010 scope.go:117] "RemoveContainer" containerID="cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.339962 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5578448889-mgm67" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.350819 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-config" (OuterVolumeSpecName: "config") pod "f07f7850-8e0e-401a-9f35-3d0d0ac38711" (UID: "f07f7850-8e0e-401a-9f35-3d0d0ac38711"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.380105 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f07f7850-8e0e-401a-9f35-3d0d0ac38711" (UID: "f07f7850-8e0e-401a-9f35-3d0d0ac38711"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.383176 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc\") pod \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\" (UID: \"f07f7850-8e0e-401a-9f35-3d0d0ac38711\") " Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.384276 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44j7b\" (UniqueName: \"kubernetes.io/projected/f07f7850-8e0e-401a-9f35-3d0d0ac38711-kube-api-access-44j7b\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.384303 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:40 crc kubenswrapper[5010]: W1126 15:49:40.385184 5010 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/f07f7850-8e0e-401a-9f35-3d0d0ac38711/volumes/kubernetes.io~configmap/dns-svc Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.385203 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f07f7850-8e0e-401a-9f35-3d0d0ac38711" (UID: "f07f7850-8e0e-401a-9f35-3d0d0ac38711"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.391047 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f07f7850-8e0e-401a-9f35-3d0d0ac38711" (UID: "f07f7850-8e0e-401a-9f35-3d0d0ac38711"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.477157 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f07f7850-8e0e-401a-9f35-3d0d0ac38711" (UID: "f07f7850-8e0e-401a-9f35-3d0d0ac38711"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.478449 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f07f7850-8e0e-401a-9f35-3d0d0ac38711" (UID: "f07f7850-8e0e-401a-9f35-3d0d0ac38711"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.502524 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.502560 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.502576 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.502586 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f07f7850-8e0e-401a-9f35-3d0d0ac38711-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.512574 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.626860 5010 scope.go:117] "RemoveContainer" containerID="2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.677141 5010 scope.go:117] "RemoveContainer" containerID="cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e" Nov 26 15:49:40 crc kubenswrapper[5010]: E1126 15:49:40.677634 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e\": container with ID starting with cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e not found: ID does not exist" containerID="cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.677662 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e"} err="failed to get container status \"cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e\": rpc error: code = NotFound desc = could not find container \"cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e\": container with ID starting with cc15b03576b77566a7ea966d62b2e7544c967a468f2a215d4ad88c3ab18e870e not found: ID does not exist" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.677685 5010 scope.go:117] "RemoveContainer" containerID="2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec" Nov 26 15:49:40 crc kubenswrapper[5010]: E1126 15:49:40.679791 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec\": container with ID starting with 2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec not found: ID does not exist" containerID="2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.679816 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec"} err="failed to get container status \"2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec\": rpc error: code = NotFound desc = could not find container \"2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec\": container with ID starting with 2db751fc06ed93341e265f7fd80733a9389313671ab4c763de1c070e1e1d73ec not found: ID does not exist" Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.790772 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5578448889-mgm67"] Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.829239 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5578448889-mgm67"] Nov 26 15:49:40 crc kubenswrapper[5010]: I1126 15:49:40.895880 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:40 crc kubenswrapper[5010]: W1126 15:49:40.904648 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4122768_1341_48d8_8fb1_c0a60a4d9b43.slice/crio-199e1e90e6f44af176e2d1380db251536321333e13d0c39844c37f7bd029742e WatchSource:0}: Error finding container 199e1e90e6f44af176e2d1380db251536321333e13d0c39844c37f7bd029742e: Status 404 returned error can't find the container with id 199e1e90e6f44af176e2d1380db251536321333e13d0c39844c37f7bd029742e Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.004513 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f468f79cc-w6vst"] Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.354437 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xspb2" event={"ID":"cb11973b-5bc3-4f90-979d-921d29b03c0a","Type":"ContainerStarted","Data":"38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20"} Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.357095 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4122768-1341-48d8-8fb1-c0a60a4d9b43","Type":"ContainerStarted","Data":"199e1e90e6f44af176e2d1380db251536321333e13d0c39844c37f7bd029742e"} Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.359429 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18c7269a-4239-4da6-8f67-fbcbdc2cf38d","Type":"ContainerStarted","Data":"74773908643a09997c98822011ce3b12945d31ccad422f694ac07acb476a6018"} Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.363320 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" event={"ID":"56f52142-3dc2-42d5-bfdf-4453c630b257","Type":"ContainerStarted","Data":"0695a58faf38f30d6cf5da98255ca6e9bc509759db656111e3a24a42de4c836b"} Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.374281 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xspb2" podStartSLOduration=3.104747515 podStartE2EDuration="8.374265211s" podCreationTimestamp="2025-11-26 15:49:33 +0000 UTC" firstStartedPulling="2025-11-26 15:49:35.128283819 +0000 UTC m=+1395.919000967" lastFinishedPulling="2025-11-26 15:49:40.397801505 +0000 UTC m=+1401.188518663" observedRunningTime="2025-11-26 15:49:41.374251651 +0000 UTC m=+1402.164968829" watchObservedRunningTime="2025-11-26 15:49:41.374265211 +0000 UTC m=+1402.164982359" Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.735758 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:41 crc kubenswrapper[5010]: I1126 15:49:41.901421 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" path="/var/lib/kubelet/pods/f07f7850-8e0e-401a-9f35-3d0d0ac38711/volumes" Nov 26 15:49:42 crc kubenswrapper[5010]: I1126 15:49:42.383272 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:49:42 crc kubenswrapper[5010]: I1126 15:49:42.390803 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4122768-1341-48d8-8fb1-c0a60a4d9b43","Type":"ContainerStarted","Data":"8025ddf8c79e0dd5a0db7c54245911bdd8a60aed815e3b219e5523ed1738b8a8"} Nov 26 15:49:42 crc kubenswrapper[5010]: I1126 15:49:42.392084 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerID="52f3aec60687b126b0f26081e4fbe4521656a4359a4c9d93ab37d66de9b8d2ea" exitCode=0 Nov 26 15:49:42 crc kubenswrapper[5010]: I1126 15:49:42.392292 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" event={"ID":"56f52142-3dc2-42d5-bfdf-4453c630b257","Type":"ContainerDied","Data":"52f3aec60687b126b0f26081e4fbe4521656a4359a4c9d93ab37d66de9b8d2ea"} Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.432438 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" event={"ID":"56f52142-3dc2-42d5-bfdf-4453c630b257","Type":"ContainerStarted","Data":"d787974dd62c14ff6c68644045b18c71e3c386b89e87f671ce2be54c1ac3c7a2"} Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.433116 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.443780 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.443830 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.448337 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api-log" containerID="cri-o://8025ddf8c79e0dd5a0db7c54245911bdd8a60aed815e3b219e5523ed1738b8a8" gracePeriod=30 Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.448699 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4122768-1341-48d8-8fb1-c0a60a4d9b43","Type":"ContainerStarted","Data":"e2929c9e91e989dd1f3769abbff5d6dc7d6aee5e461966224d0f7ffc20c5f0a4"} Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.448901 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.448952 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api" containerID="cri-o://e2929c9e91e989dd1f3769abbff5d6dc7d6aee5e461966224d0f7ffc20c5f0a4" gracePeriod=30 Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.469308 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" podStartSLOduration=4.469283898 podStartE2EDuration="4.469283898s" podCreationTimestamp="2025-11-26 15:49:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:43.458009405 +0000 UTC m=+1404.248726563" watchObservedRunningTime="2025-11-26 15:49:43.469283898 +0000 UTC m=+1404.260001046" Nov 26 15:49:43 crc kubenswrapper[5010]: I1126 15:49:43.487678 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.487657707 podStartE2EDuration="4.487657707s" podCreationTimestamp="2025-11-26 15:49:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:43.479482213 +0000 UTC m=+1404.270199371" watchObservedRunningTime="2025-11-26 15:49:43.487657707 +0000 UTC m=+1404.278374855" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.255049 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-94c89cc6d-zkrdd" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.255071 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-94c89cc6d-zkrdd" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.463558 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerID="e2929c9e91e989dd1f3769abbff5d6dc7d6aee5e461966224d0f7ffc20c5f0a4" exitCode=0 Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.463621 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerID="8025ddf8c79e0dd5a0db7c54245911bdd8a60aed815e3b219e5523ed1738b8a8" exitCode=143 Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.463647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4122768-1341-48d8-8fb1-c0a60a4d9b43","Type":"ContainerDied","Data":"e2929c9e91e989dd1f3769abbff5d6dc7d6aee5e461966224d0f7ffc20c5f0a4"} Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.463695 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4122768-1341-48d8-8fb1-c0a60a4d9b43","Type":"ContainerDied","Data":"8025ddf8c79e0dd5a0db7c54245911bdd8a60aed815e3b219e5523ed1738b8a8"} Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.467512 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18c7269a-4239-4da6-8f67-fbcbdc2cf38d","Type":"ContainerStarted","Data":"1b52c80569c12e1eea14351e7f52a718a2357ad66408a44bdab6778c4a6edc70"} Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.542275 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xspb2" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="registry-server" probeResult="failure" output=< Nov 26 15:49:44 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 15:49:44 crc kubenswrapper[5010]: > Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.755150 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.846857 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g5flf\" (UniqueName: \"kubernetes.io/projected/f4122768-1341-48d8-8fb1-c0a60a4d9b43-kube-api-access-g5flf\") pod \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.846962 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data-custom\") pod \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.847070 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4122768-1341-48d8-8fb1-c0a60a4d9b43-etc-machine-id\") pod \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.847108 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-combined-ca-bundle\") pod \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.847317 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data\") pod \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.847371 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-scripts\") pod \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.847432 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4122768-1341-48d8-8fb1-c0a60a4d9b43-logs\") pod \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\" (UID: \"f4122768-1341-48d8-8fb1-c0a60a4d9b43\") " Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.848679 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4122768-1341-48d8-8fb1-c0a60a4d9b43-logs" (OuterVolumeSpecName: "logs") pod "f4122768-1341-48d8-8fb1-c0a60a4d9b43" (UID: "f4122768-1341-48d8-8fb1-c0a60a4d9b43"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.850462 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4122768-1341-48d8-8fb1-c0a60a4d9b43-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f4122768-1341-48d8-8fb1-c0a60a4d9b43" (UID: "f4122768-1341-48d8-8fb1-c0a60a4d9b43"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.857597 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4122768-1341-48d8-8fb1-c0a60a4d9b43-kube-api-access-g5flf" (OuterVolumeSpecName: "kube-api-access-g5flf") pod "f4122768-1341-48d8-8fb1-c0a60a4d9b43" (UID: "f4122768-1341-48d8-8fb1-c0a60a4d9b43"). InnerVolumeSpecName "kube-api-access-g5flf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.861836 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f4122768-1341-48d8-8fb1-c0a60a4d9b43" (UID: "f4122768-1341-48d8-8fb1-c0a60a4d9b43"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.873726 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-scripts" (OuterVolumeSpecName: "scripts") pod "f4122768-1341-48d8-8fb1-c0a60a4d9b43" (UID: "f4122768-1341-48d8-8fb1-c0a60a4d9b43"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.928832 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f4122768-1341-48d8-8fb1-c0a60a4d9b43" (UID: "f4122768-1341-48d8-8fb1-c0a60a4d9b43"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.951060 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.952481 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f4122768-1341-48d8-8fb1-c0a60a4d9b43-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.952613 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g5flf\" (UniqueName: \"kubernetes.io/projected/f4122768-1341-48d8-8fb1-c0a60a4d9b43-kube-api-access-g5flf\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.952686 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.952778 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f4122768-1341-48d8-8fb1-c0a60a4d9b43-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:44 crc kubenswrapper[5010]: I1126 15:49:44.952846 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.013869 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data" (OuterVolumeSpecName: "config-data") pod "f4122768-1341-48d8-8fb1-c0a60a4d9b43" (UID: "f4122768-1341-48d8-8fb1-c0a60a4d9b43"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.037389 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.054908 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f4122768-1341-48d8-8fb1-c0a60a4d9b43-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.483128 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.483371 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f4122768-1341-48d8-8fb1-c0a60a4d9b43","Type":"ContainerDied","Data":"199e1e90e6f44af176e2d1380db251536321333e13d0c39844c37f7bd029742e"} Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.483472 5010 scope.go:117] "RemoveContainer" containerID="e2929c9e91e989dd1f3769abbff5d6dc7d6aee5e461966224d0f7ffc20c5f0a4" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.534427 5010 scope.go:117] "RemoveContainer" containerID="8025ddf8c79e0dd5a0db7c54245911bdd8a60aed815e3b219e5523ed1738b8a8" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.541824 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.557568 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.571878 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:45 crc kubenswrapper[5010]: E1126 15:49:45.572318 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerName="init" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.572334 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerName="init" Nov 26 15:49:45 crc kubenswrapper[5010]: E1126 15:49:45.572350 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerName="dnsmasq-dns" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.572361 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerName="dnsmasq-dns" Nov 26 15:49:45 crc kubenswrapper[5010]: E1126 15:49:45.572371 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.572377 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api" Nov 26 15:49:45 crc kubenswrapper[5010]: E1126 15:49:45.572394 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api-log" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.572401 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api-log" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.572571 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.572590 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f07f7850-8e0e-401a-9f35-3d0d0ac38711" containerName="dnsmasq-dns" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.572604 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" containerName="cinder-api-log" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.579518 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.582880 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.586400 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.586769 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.590199 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.647397 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.653153 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.657003 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.663291 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.663484 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-zdqmk" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.663555 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.665960 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mvqh\" (UniqueName: \"kubernetes.io/projected/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-kube-api-access-8mvqh\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666075 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666099 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666131 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-scripts\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666145 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666181 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-logs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666243 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666265 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.666297 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data-custom\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.671326 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.767970 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data-custom\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768099 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mvqh\" (UniqueName: \"kubernetes.io/projected/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-kube-api-access-8mvqh\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768136 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-combined-ca-bundle\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768169 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k59x\" (UniqueName: \"kubernetes.io/projected/08acaf58-5c2f-4fb4-8863-846c28f8d016-kube-api-access-2k59x\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768213 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config-secret\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768316 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768341 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768388 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768418 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-scripts\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768438 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-logs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768465 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768494 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768521 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.768625 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-etc-machine-id\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.783095 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data-custom\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.783352 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-scripts\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.787054 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-public-tls-certs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.789396 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.793449 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.796747 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.802000 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-logs\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.807481 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mvqh\" (UniqueName: \"kubernetes.io/projected/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-kube-api-access-8mvqh\") pod \"cinder-api-0\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.870743 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-combined-ca-bundle\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.871087 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k59x\" (UniqueName: \"kubernetes.io/projected/08acaf58-5c2f-4fb4-8863-846c28f8d016-kube-api-access-2k59x\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.871231 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config-secret\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.871425 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.873068 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.878620 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config-secret\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.879781 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-combined-ca-bundle\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.902723 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.903385 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k59x\" (UniqueName: \"kubernetes.io/projected/08acaf58-5c2f-4fb4-8863-846c28f8d016-kube-api-access-2k59x\") pod \"openstackclient\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " pod="openstack/openstackclient" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.942513 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4122768-1341-48d8-8fb1-c0a60a4d9b43" path="/var/lib/kubelet/pods/f4122768-1341-48d8-8fb1-c0a60a4d9b43/volumes" Nov 26 15:49:45 crc kubenswrapper[5010]: I1126 15:49:45.981318 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 15:49:46 crc kubenswrapper[5010]: I1126 15:49:46.497536 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18c7269a-4239-4da6-8f67-fbcbdc2cf38d","Type":"ContainerStarted","Data":"697863df31aa11c3b3689a39d344db5fd64f99efadba31888058e8d157be83c5"} Nov 26 15:49:46 crc kubenswrapper[5010]: I1126 15:49:46.545031 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.704302067 podStartE2EDuration="7.545001106s" podCreationTimestamp="2025-11-26 15:49:39 +0000 UTC" firstStartedPulling="2025-11-26 15:49:40.510200057 +0000 UTC m=+1401.300917195" lastFinishedPulling="2025-11-26 15:49:43.350899086 +0000 UTC m=+1404.141616234" observedRunningTime="2025-11-26 15:49:46.529401366 +0000 UTC m=+1407.320118524" watchObservedRunningTime="2025-11-26 15:49:46.545001106 +0000 UTC m=+1407.335718254" Nov 26 15:49:46 crc kubenswrapper[5010]: I1126 15:49:46.680363 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:49:46 crc kubenswrapper[5010]: I1126 15:49:46.867157 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 15:49:47 crc kubenswrapper[5010]: I1126 15:49:47.514552 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd","Type":"ContainerStarted","Data":"da3dc1e4dbe15fba67590315abbdb41393dc3b2b5648156ab5c3380328ea7b9c"} Nov 26 15:49:47 crc kubenswrapper[5010]: I1126 15:49:47.529949 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"08acaf58-5c2f-4fb4-8863-846c28f8d016","Type":"ContainerStarted","Data":"3d6c9b333b0c9a1086810ba0aa508a9a33a490bd249f6ba2f2c1da6a468208c4"} Nov 26 15:49:47 crc kubenswrapper[5010]: I1126 15:49:47.536572 5010 generic.go:334] "Generic (PLEG): container finished" podID="647fcd2c-c729-4401-95f8-c38dede33299" containerID="bb62a936b79835b6c73ac07392dd96fd3fc5d2d4ac67dcace4873b04bd1fc9b7" exitCode=0 Nov 26 15:49:47 crc kubenswrapper[5010]: I1126 15:49:47.537685 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h78d6" event={"ID":"647fcd2c-c729-4401-95f8-c38dede33299","Type":"ContainerDied","Data":"bb62a936b79835b6c73ac07392dd96fd3fc5d2d4ac67dcace4873b04bd1fc9b7"} Nov 26 15:49:48 crc kubenswrapper[5010]: I1126 15:49:48.560225 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd","Type":"ContainerStarted","Data":"7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5"} Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.050735 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h78d6" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.137536 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-combined-ca-bundle\") pod \"647fcd2c-c729-4401-95f8-c38dede33299\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.137670 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnmqr\" (UniqueName: \"kubernetes.io/projected/647fcd2c-c729-4401-95f8-c38dede33299-kube-api-access-hnmqr\") pod \"647fcd2c-c729-4401-95f8-c38dede33299\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.137719 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-config\") pod \"647fcd2c-c729-4401-95f8-c38dede33299\" (UID: \"647fcd2c-c729-4401-95f8-c38dede33299\") " Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.171349 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/647fcd2c-c729-4401-95f8-c38dede33299-kube-api-access-hnmqr" (OuterVolumeSpecName: "kube-api-access-hnmqr") pod "647fcd2c-c729-4401-95f8-c38dede33299" (UID: "647fcd2c-c729-4401-95f8-c38dede33299"). InnerVolumeSpecName "kube-api-access-hnmqr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.207471 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-config" (OuterVolumeSpecName: "config") pod "647fcd2c-c729-4401-95f8-c38dede33299" (UID: "647fcd2c-c729-4401-95f8-c38dede33299"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.212856 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "647fcd2c-c729-4401-95f8-c38dede33299" (UID: "647fcd2c-c729-4401-95f8-c38dede33299"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.239782 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnmqr\" (UniqueName: \"kubernetes.io/projected/647fcd2c-c729-4401-95f8-c38dede33299-kube-api-access-hnmqr\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.239814 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.239824 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/647fcd2c-c729-4401-95f8-c38dede33299-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.572847 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd","Type":"ContainerStarted","Data":"7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822"} Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.573555 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.581811 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-h78d6" event={"ID":"647fcd2c-c729-4401-95f8-c38dede33299","Type":"ContainerDied","Data":"7e54ff0925a34b3616affb9a7ec28a97e0c4b237ef51dba9d20cf213f636bef1"} Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.582132 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e54ff0925a34b3616affb9a7ec28a97e0c4b237ef51dba9d20cf213f636bef1" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.582051 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-h78d6" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.609233 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.609213836 podStartE2EDuration="4.609213836s" podCreationTimestamp="2025-11-26 15:49:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:49.60218001 +0000 UTC m=+1410.392897148" watchObservedRunningTime="2025-11-26 15:49:49.609213836 +0000 UTC m=+1410.399930984" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.640680 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.821485 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f468f79cc-w6vst"] Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.829912 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerName="dnsmasq-dns" containerID="cri-o://d787974dd62c14ff6c68644045b18c71e3c386b89e87f671ce2be54c1ac3c7a2" gracePeriod=10 Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.834197 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.857215 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7b84d979b9-x2vg9"] Nov 26 15:49:49 crc kubenswrapper[5010]: E1126 15:49:49.857969 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="647fcd2c-c729-4401-95f8-c38dede33299" containerName="neutron-db-sync" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.858063 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="647fcd2c-c729-4401-95f8-c38dede33299" containerName="neutron-db-sync" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.862341 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="647fcd2c-c729-4401-95f8-c38dede33299" containerName="neutron-db-sync" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.863552 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.865147 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.883660 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b84d979b9-x2vg9"] Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.958826 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.959133 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-config\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.959250 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.959360 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-swift-storage-0\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.959473 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scjh8\" (UniqueName: \"kubernetes.io/projected/b4cd6af5-1fd1-450c-b157-009b6c25f21c-kube-api-access-scjh8\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.959571 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-svc\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.962787 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-747455655b-ldrpd"] Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.985410 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.990452 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.990593 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-j5x4k" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.999067 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 15:49:49 crc kubenswrapper[5010]: I1126 15:49:49.999574 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.002614 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-747455655b-ldrpd"] Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.063571 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.070668 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-config\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.070759 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071053 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-config\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071351 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071441 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-ovndb-tls-certs\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071534 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-swift-storage-0\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071656 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-httpd-config\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071749 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scjh8\" (UniqueName: \"kubernetes.io/projected/b4cd6af5-1fd1-450c-b157-009b6c25f21c-kube-api-access-scjh8\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071817 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-combined-ca-bundle\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.071859 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-svc\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.072014 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l28j5\" (UniqueName: \"kubernetes.io/projected/3c9208c7-3716-48e8-9679-c1bb140259eb-kube-api-access-l28j5\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.072211 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-sb\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.073142 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-swift-storage-0\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.073638 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-nb\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.081104 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-config\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.084582 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-svc\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.135627 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scjh8\" (UniqueName: \"kubernetes.io/projected/b4cd6af5-1fd1-450c-b157-009b6c25f21c-kube-api-access-scjh8\") pod \"dnsmasq-dns-7b84d979b9-x2vg9\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.174009 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l28j5\" (UniqueName: \"kubernetes.io/projected/3c9208c7-3716-48e8-9679-c1bb140259eb-kube-api-access-l28j5\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.174323 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-config\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.174498 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-ovndb-tls-certs\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.174608 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-httpd-config\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.174732 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-combined-ca-bundle\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.194176 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-config\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.200298 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-combined-ca-bundle\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.202524 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-ovndb-tls-certs\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.214568 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-httpd-config\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.236137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l28j5\" (UniqueName: \"kubernetes.io/projected/3c9208c7-3716-48e8-9679-c1bb140259eb-kube-api-access-l28j5\") pod \"neutron-747455655b-ldrpd\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.299176 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.341989 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.619782 5010 generic.go:334] "Generic (PLEG): container finished" podID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerID="d787974dd62c14ff6c68644045b18c71e3c386b89e87f671ce2be54c1ac3c7a2" exitCode=0 Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.620650 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" event={"ID":"56f52142-3dc2-42d5-bfdf-4453c630b257","Type":"ContainerDied","Data":"d787974dd62c14ff6c68644045b18c71e3c386b89e87f671ce2be54c1ac3c7a2"} Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.738763 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.785061 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.796264 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-config\") pod \"56f52142-3dc2-42d5-bfdf-4453c630b257\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.796381 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-nb\") pod \"56f52142-3dc2-42d5-bfdf-4453c630b257\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.796497 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqkzz\" (UniqueName: \"kubernetes.io/projected/56f52142-3dc2-42d5-bfdf-4453c630b257-kube-api-access-bqkzz\") pod \"56f52142-3dc2-42d5-bfdf-4453c630b257\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.796588 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-swift-storage-0\") pod \"56f52142-3dc2-42d5-bfdf-4453c630b257\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.796624 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-sb\") pod \"56f52142-3dc2-42d5-bfdf-4453c630b257\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.796683 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-svc\") pod \"56f52142-3dc2-42d5-bfdf-4453c630b257\" (UID: \"56f52142-3dc2-42d5-bfdf-4453c630b257\") " Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.804659 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56f52142-3dc2-42d5-bfdf-4453c630b257-kube-api-access-bqkzz" (OuterVolumeSpecName: "kube-api-access-bqkzz") pod "56f52142-3dc2-42d5-bfdf-4453c630b257" (UID: "56f52142-3dc2-42d5-bfdf-4453c630b257"). InnerVolumeSpecName "kube-api-access-bqkzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.909330 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqkzz\" (UniqueName: \"kubernetes.io/projected/56f52142-3dc2-42d5-bfdf-4453c630b257-kube-api-access-bqkzz\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.921106 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-94c89cc6d-zkrdd"] Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.921429 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-94c89cc6d-zkrdd" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api-log" containerID="cri-o://cdde83ebdc722c4c90f4ab58cc6868315adb9fed02e7f54d20177203d3502628" gracePeriod=30 Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.922841 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-94c89cc6d-zkrdd" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api" containerID="cri-o://de289b713a8647e5a6edd6ea75983aa642bc11037f6380f5ab8fb186a9aefc82" gracePeriod=30 Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.946012 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.946836 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "56f52142-3dc2-42d5-bfdf-4453c630b257" (UID: "56f52142-3dc2-42d5-bfdf-4453c630b257"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.952225 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-config" (OuterVolumeSpecName: "config") pod "56f52142-3dc2-42d5-bfdf-4453c630b257" (UID: "56f52142-3dc2-42d5-bfdf-4453c630b257"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.983512 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "56f52142-3dc2-42d5-bfdf-4453c630b257" (UID: "56f52142-3dc2-42d5-bfdf-4453c630b257"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:50 crc kubenswrapper[5010]: I1126 15:49:50.987125 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "56f52142-3dc2-42d5-bfdf-4453c630b257" (UID: "56f52142-3dc2-42d5-bfdf-4453c630b257"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.012831 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.012914 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.012927 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.012938 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.028070 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "56f52142-3dc2-42d5-bfdf-4453c630b257" (UID: "56f52142-3dc2-42d5-bfdf-4453c630b257"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.044256 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.114994 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/56f52142-3dc2-42d5-bfdf-4453c630b257-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.304069 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7b84d979b9-x2vg9"] Nov 26 15:49:51 crc kubenswrapper[5010]: W1126 15:49:51.327852 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4cd6af5_1fd1_450c_b157_009b6c25f21c.slice/crio-80e2c8578314b28a1542f5b8c39382694a83f68d39d42d4255e9aaab47e70039 WatchSource:0}: Error finding container 80e2c8578314b28a1542f5b8c39382694a83f68d39d42d4255e9aaab47e70039: Status 404 returned error can't find the container with id 80e2c8578314b28a1542f5b8c39382694a83f68d39d42d4255e9aaab47e70039 Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.404983 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-747455655b-ldrpd"] Nov 26 15:49:51 crc kubenswrapper[5010]: W1126 15:49:51.410214 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c9208c7_3716_48e8_9679_c1bb140259eb.slice/crio-c90d59a1cdafcc8abd8f10fe7bd3ef51157602680a2751352c01c73b1a9a8d15 WatchSource:0}: Error finding container c90d59a1cdafcc8abd8f10fe7bd3ef51157602680a2751352c01c73b1a9a8d15: Status 404 returned error can't find the container with id c90d59a1cdafcc8abd8f10fe7bd3ef51157602680a2751352c01c73b1a9a8d15 Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.632295 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.632840 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f468f79cc-w6vst" event={"ID":"56f52142-3dc2-42d5-bfdf-4453c630b257","Type":"ContainerDied","Data":"0695a58faf38f30d6cf5da98255ca6e9bc509759db656111e3a24a42de4c836b"} Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.632906 5010 scope.go:117] "RemoveContainer" containerID="d787974dd62c14ff6c68644045b18c71e3c386b89e87f671ce2be54c1ac3c7a2" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.638227 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" event={"ID":"b4cd6af5-1fd1-450c-b157-009b6c25f21c","Type":"ContainerStarted","Data":"80e2c8578314b28a1542f5b8c39382694a83f68d39d42d4255e9aaab47e70039"} Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.639428 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-747455655b-ldrpd" event={"ID":"3c9208c7-3716-48e8-9679-c1bb140259eb","Type":"ContainerStarted","Data":"c90d59a1cdafcc8abd8f10fe7bd3ef51157602680a2751352c01c73b1a9a8d15"} Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.646728 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94c89cc6d-zkrdd" event={"ID":"f1e7fbb2-de0b-4911-b314-db803d9f9d77","Type":"ContainerDied","Data":"cdde83ebdc722c4c90f4ab58cc6868315adb9fed02e7f54d20177203d3502628"} Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.646692 5010 generic.go:334] "Generic (PLEG): container finished" podID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerID="cdde83ebdc722c4c90f4ab58cc6868315adb9fed02e7f54d20177203d3502628" exitCode=143 Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.647084 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="cinder-scheduler" containerID="cri-o://1b52c80569c12e1eea14351e7f52a718a2357ad66408a44bdab6778c4a6edc70" gracePeriod=30 Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.647133 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="probe" containerID="cri-o://697863df31aa11c3b3689a39d344db5fd64f99efadba31888058e8d157be83c5" gracePeriod=30 Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.671238 5010 scope.go:117] "RemoveContainer" containerID="52f3aec60687b126b0f26081e4fbe4521656a4359a4c9d93ab37d66de9b8d2ea" Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.682607 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f468f79cc-w6vst"] Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.693540 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f468f79cc-w6vst"] Nov 26 15:49:51 crc kubenswrapper[5010]: I1126 15:49:51.906811 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" path="/var/lib/kubelet/pods/56f52142-3dc2-42d5-bfdf-4453c630b257/volumes" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.484044 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c9c764c5c-5p8zc"] Nov 26 15:49:52 crc kubenswrapper[5010]: E1126 15:49:52.484490 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerName="init" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.484518 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerName="init" Nov 26 15:49:52 crc kubenswrapper[5010]: E1126 15:49:52.484558 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerName="dnsmasq-dns" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.484566 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerName="dnsmasq-dns" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.484795 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="56f52142-3dc2-42d5-bfdf-4453c630b257" containerName="dnsmasq-dns" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.486067 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.491670 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.496696 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.502094 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c9c764c5c-5p8zc"] Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.547793 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-ovndb-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.548107 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w9bg\" (UniqueName: \"kubernetes.io/projected/5eee7686-f868-4e9e-bf61-b108eeb88bfa-kube-api-access-8w9bg\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.548437 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-public-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.548550 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-config\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.548649 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-internal-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.548898 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-combined-ca-bundle\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.549037 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-httpd-config\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.651387 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-internal-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.653369 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-combined-ca-bundle\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.654281 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-httpd-config\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.655161 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-ovndb-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.655296 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w9bg\" (UniqueName: \"kubernetes.io/projected/5eee7686-f868-4e9e-bf61-b108eeb88bfa-kube-api-access-8w9bg\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.655403 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-public-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.655513 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-config\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.660329 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-internal-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.660574 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-public-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.667090 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-ovndb-tls-certs\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.667282 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-httpd-config\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.668991 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-combined-ca-bundle\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.669396 5010 generic.go:334] "Generic (PLEG): container finished" podID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerID="bf4328bbab9dff42cb5204482ada1785957884bbeb2b0075628ff5c5b9b9a32d" exitCode=0 Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.669495 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" event={"ID":"b4cd6af5-1fd1-450c-b157-009b6c25f21c","Type":"ContainerDied","Data":"bf4328bbab9dff42cb5204482ada1785957884bbeb2b0075628ff5c5b9b9a32d"} Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.670846 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-config\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.676208 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w9bg\" (UniqueName: \"kubernetes.io/projected/5eee7686-f868-4e9e-bf61-b108eeb88bfa-kube-api-access-8w9bg\") pod \"neutron-5c9c764c5c-5p8zc\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.681687 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-747455655b-ldrpd" event={"ID":"3c9208c7-3716-48e8-9679-c1bb140259eb","Type":"ContainerStarted","Data":"85ff0b0e3b7dd434e6128d567eeeeec11fd8ac7b2d055a682903e7034e63280c"} Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.681920 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-747455655b-ldrpd" event={"ID":"3c9208c7-3716-48e8-9679-c1bb140259eb","Type":"ContainerStarted","Data":"9b495458e612128334395409d752e36730b0d6b39ff6af3ed4daa774634efdd7"} Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.683101 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.737684 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-747455655b-ldrpd" podStartSLOduration=3.737657293 podStartE2EDuration="3.737657293s" podCreationTimestamp="2025-11-26 15:49:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:52.735422727 +0000 UTC m=+1413.526139875" watchObservedRunningTime="2025-11-26 15:49:52.737657293 +0000 UTC m=+1413.528374441" Nov 26 15:49:52 crc kubenswrapper[5010]: I1126 15:49:52.828427 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.502259 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.595088 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.698215 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" event={"ID":"b4cd6af5-1fd1-450c-b157-009b6c25f21c","Type":"ContainerStarted","Data":"21ca72119b84346f83405f5a18e259bae7b196004fd281f29d68a5586931f253"} Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.699374 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.703845 5010 generic.go:334] "Generic (PLEG): container finished" podID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerID="697863df31aa11c3b3689a39d344db5fd64f99efadba31888058e8d157be83c5" exitCode=0 Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.705485 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18c7269a-4239-4da6-8f67-fbcbdc2cf38d","Type":"ContainerDied","Data":"697863df31aa11c3b3689a39d344db5fd64f99efadba31888058e8d157be83c5"} Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.710336 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c9c764c5c-5p8zc"] Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.740280 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" podStartSLOduration=4.7402559029999995 podStartE2EDuration="4.740255903s" podCreationTimestamp="2025-11-26 15:49:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:53.730505749 +0000 UTC m=+1414.521222897" watchObservedRunningTime="2025-11-26 15:49:53.740255903 +0000 UTC m=+1414.530973061" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.762622 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xspb2"] Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.855047 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6d9f966b7c-7cbw2"] Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.864512 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.867401 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.867821 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.871285 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 15:49:53 crc kubenswrapper[5010]: I1126 15:49:53.950286 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6d9f966b7c-7cbw2"] Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.000989 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8m6c\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-kube-api-access-v8m6c\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.001158 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-combined-ca-bundle\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.003617 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-config-data\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.005231 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-etc-swift\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.005277 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-run-httpd\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.005346 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-log-httpd\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.005485 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-internal-tls-certs\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.005535 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-public-tls-certs\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107460 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-log-httpd\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107558 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-internal-tls-certs\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107594 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-public-tls-certs\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107634 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8m6c\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-kube-api-access-v8m6c\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107658 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-combined-ca-bundle\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107683 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-config-data\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107798 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-etc-swift\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.107816 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-run-httpd\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.108330 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-run-httpd\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.108658 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-log-httpd\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.115066 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-etc-swift\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.115778 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-public-tls-certs\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.117459 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-internal-tls-certs\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.118305 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-config-data\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.118410 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-combined-ca-bundle\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.134528 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8m6c\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-kube-api-access-v8m6c\") pod \"swift-proxy-6d9f966b7c-7cbw2\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.197954 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-94c89cc6d-zkrdd" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:43636->10.217.0.159:9311: read: connection reset by peer" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.197977 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-94c89cc6d-zkrdd" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.159:9311/healthcheck\": read tcp 10.217.0.2:43622->10.217.0.159:9311: read: connection reset by peer" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.200680 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.731988 5010 generic.go:334] "Generic (PLEG): container finished" podID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerID="de289b713a8647e5a6edd6ea75983aa642bc11037f6380f5ab8fb186a9aefc82" exitCode=0 Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.732508 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94c89cc6d-zkrdd" event={"ID":"f1e7fbb2-de0b-4911-b314-db803d9f9d77","Type":"ContainerDied","Data":"de289b713a8647e5a6edd6ea75983aa642bc11037f6380f5ab8fb186a9aefc82"} Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.734564 5010 generic.go:334] "Generic (PLEG): container finished" podID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerID="1b52c80569c12e1eea14351e7f52a718a2357ad66408a44bdab6778c4a6edc70" exitCode=0 Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.734608 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18c7269a-4239-4da6-8f67-fbcbdc2cf38d","Type":"ContainerDied","Data":"1b52c80569c12e1eea14351e7f52a718a2357ad66408a44bdab6778c4a6edc70"} Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.737246 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xspb2" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="registry-server" containerID="cri-o://38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20" gracePeriod=2 Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.738627 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9c764c5c-5p8zc" event={"ID":"5eee7686-f868-4e9e-bf61-b108eeb88bfa","Type":"ContainerStarted","Data":"0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd"} Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.741054 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.741084 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9c764c5c-5p8zc" event={"ID":"5eee7686-f868-4e9e-bf61-b108eeb88bfa","Type":"ContainerStarted","Data":"7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b"} Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.741099 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9c764c5c-5p8zc" event={"ID":"5eee7686-f868-4e9e-bf61-b108eeb88bfa","Type":"ContainerStarted","Data":"c6ee01db2fd8acea0588e24b22ccc2540c42999b595eab2cf463d2e2b9ee5e22"} Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.862716 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.903656 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c9c764c5c-5p8zc" podStartSLOduration=2.903628135 podStartE2EDuration="2.903628135s" podCreationTimestamp="2025-11-26 15:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:54.778138556 +0000 UTC m=+1415.568855704" watchObservedRunningTime="2025-11-26 15:49:54.903628135 +0000 UTC m=+1415.694345283" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.933075 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-combined-ca-bundle\") pod \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.933136 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rpns\" (UniqueName: \"kubernetes.io/projected/f1e7fbb2-de0b-4911-b314-db803d9f9d77-kube-api-access-4rpns\") pod \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.933169 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data\") pod \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.942045 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.942410 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-central-agent" containerID="cri-o://f6b75f622de1f2fa6dd717254a3425121af55e3b07e8946351d238dacc57d359" gracePeriod=30 Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.943477 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="proxy-httpd" containerID="cri-o://13b66a1f285ddcb948c8ca304aaf0218edb08294e8f905e6f8bdc721290217c4" gracePeriod=30 Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.943547 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="sg-core" containerID="cri-o://9ceab6da40a2aaa46658284403150210370998d40158f78dfd12be664ad17edd" gracePeriod=30 Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.943582 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-notification-agent" containerID="cri-o://4058ddc142f1ac9afdb6aff8eaa26bcc79c4c88fec2a045d7fc221ba0ddeb1c6" gracePeriod=30 Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.945081 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1e7fbb2-de0b-4911-b314-db803d9f9d77-kube-api-access-4rpns" (OuterVolumeSpecName: "kube-api-access-4rpns") pod "f1e7fbb2-de0b-4911-b314-db803d9f9d77" (UID: "f1e7fbb2-de0b-4911-b314-db803d9f9d77"). InnerVolumeSpecName "kube-api-access-4rpns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.978902 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6d9f966b7c-7cbw2"] Nov 26 15:49:54 crc kubenswrapper[5010]: I1126 15:49:54.990591 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1e7fbb2-de0b-4911-b314-db803d9f9d77" (UID: "f1e7fbb2-de0b-4911-b314-db803d9f9d77"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.012897 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data" (OuterVolumeSpecName: "config-data") pod "f1e7fbb2-de0b-4911-b314-db803d9f9d77" (UID: "f1e7fbb2-de0b-4911-b314-db803d9f9d77"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.036817 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e7fbb2-de0b-4911-b314-db803d9f9d77-logs\") pod \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.037220 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data-custom\") pod \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\" (UID: \"f1e7fbb2-de0b-4911-b314-db803d9f9d77\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.040851 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1e7fbb2-de0b-4911-b314-db803d9f9d77-logs" (OuterVolumeSpecName: "logs") pod "f1e7fbb2-de0b-4911-b314-db803d9f9d77" (UID: "f1e7fbb2-de0b-4911-b314-db803d9f9d77"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.041403 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.041431 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rpns\" (UniqueName: \"kubernetes.io/projected/f1e7fbb2-de0b-4911-b314-db803d9f9d77-kube-api-access-4rpns\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.041445 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.058393 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f1e7fbb2-de0b-4911-b314-db803d9f9d77" (UID: "f1e7fbb2-de0b-4911-b314-db803d9f9d77"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.149418 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f1e7fbb2-de0b-4911-b314-db803d9f9d77-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.149687 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1e7fbb2-de0b-4911-b314-db803d9f9d77-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.175574 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.253156 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-etc-machine-id\") pod \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.253236 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data-custom\") pod \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.253304 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data\") pod \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.253430 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcs5j\" (UniqueName: \"kubernetes.io/projected/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-kube-api-access-vcs5j\") pod \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.253511 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-combined-ca-bundle\") pod \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.253563 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-scripts\") pod \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\" (UID: \"18c7269a-4239-4da6-8f67-fbcbdc2cf38d\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.257937 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "18c7269a-4239-4da6-8f67-fbcbdc2cf38d" (UID: "18c7269a-4239-4da6-8f67-fbcbdc2cf38d"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.264690 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.304139 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "18c7269a-4239-4da6-8f67-fbcbdc2cf38d" (UID: "18c7269a-4239-4da6-8f67-fbcbdc2cf38d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.304503 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-scripts" (OuterVolumeSpecName: "scripts") pod "18c7269a-4239-4da6-8f67-fbcbdc2cf38d" (UID: "18c7269a-4239-4da6-8f67-fbcbdc2cf38d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.319641 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-kube-api-access-vcs5j" (OuterVolumeSpecName: "kube-api-access-vcs5j") pod "18c7269a-4239-4da6-8f67-fbcbdc2cf38d" (UID: "18c7269a-4239-4da6-8f67-fbcbdc2cf38d"). InnerVolumeSpecName "kube-api-access-vcs5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.331283 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.155:3000/\": read tcp 10.217.0.2:48390->10.217.0.155:3000: read: connection reset by peer" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.369872 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18c7269a-4239-4da6-8f67-fbcbdc2cf38d" (UID: "18c7269a-4239-4da6-8f67-fbcbdc2cf38d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.372231 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.372274 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.372287 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.372298 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcs5j\" (UniqueName: \"kubernetes.io/projected/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-kube-api-access-vcs5j\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.437300 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.559898 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data" (OuterVolumeSpecName: "config-data") pod "18c7269a-4239-4da6-8f67-fbcbdc2cf38d" (UID: "18c7269a-4239-4da6-8f67-fbcbdc2cf38d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.576678 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-utilities\") pod \"cb11973b-5bc3-4f90-979d-921d29b03c0a\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.576776 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vc2bd\" (UniqueName: \"kubernetes.io/projected/cb11973b-5bc3-4f90-979d-921d29b03c0a-kube-api-access-vc2bd\") pod \"cb11973b-5bc3-4f90-979d-921d29b03c0a\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.577395 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-catalog-content\") pod \"cb11973b-5bc3-4f90-979d-921d29b03c0a\" (UID: \"cb11973b-5bc3-4f90-979d-921d29b03c0a\") " Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.577828 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-utilities" (OuterVolumeSpecName: "utilities") pod "cb11973b-5bc3-4f90-979d-921d29b03c0a" (UID: "cb11973b-5bc3-4f90-979d-921d29b03c0a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.578523 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18c7269a-4239-4da6-8f67-fbcbdc2cf38d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.578538 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.584338 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb11973b-5bc3-4f90-979d-921d29b03c0a-kube-api-access-vc2bd" (OuterVolumeSpecName: "kube-api-access-vc2bd") pod "cb11973b-5bc3-4f90-979d-921d29b03c0a" (UID: "cb11973b-5bc3-4f90-979d-921d29b03c0a"). InnerVolumeSpecName "kube-api-access-vc2bd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.682645 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vc2bd\" (UniqueName: \"kubernetes.io/projected/cb11973b-5bc3-4f90-979d-921d29b03c0a-kube-api-access-vc2bd\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.740327 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb11973b-5bc3-4f90-979d-921d29b03c0a" (UID: "cb11973b-5bc3-4f90-979d-921d29b03c0a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.760192 5010 generic.go:334] "Generic (PLEG): container finished" podID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerID="13b66a1f285ddcb948c8ca304aaf0218edb08294e8f905e6f8bdc721290217c4" exitCode=0 Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.760456 5010 generic.go:334] "Generic (PLEG): container finished" podID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerID="9ceab6da40a2aaa46658284403150210370998d40158f78dfd12be664ad17edd" exitCode=2 Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.760535 5010 generic.go:334] "Generic (PLEG): container finished" podID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerID="f6b75f622de1f2fa6dd717254a3425121af55e3b07e8946351d238dacc57d359" exitCode=0 Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.760252 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerDied","Data":"13b66a1f285ddcb948c8ca304aaf0218edb08294e8f905e6f8bdc721290217c4"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.760751 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerDied","Data":"9ceab6da40a2aaa46658284403150210370998d40158f78dfd12be664ad17edd"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.760784 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerDied","Data":"f6b75f622de1f2fa6dd717254a3425121af55e3b07e8946351d238dacc57d359"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.766577 5010 generic.go:334] "Generic (PLEG): container finished" podID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerID="38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20" exitCode=0 Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.766748 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xspb2" event={"ID":"cb11973b-5bc3-4f90-979d-921d29b03c0a","Type":"ContainerDied","Data":"38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.766853 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xspb2" event={"ID":"cb11973b-5bc3-4f90-979d-921d29b03c0a","Type":"ContainerDied","Data":"3d89efe2146dc9c76f1094f848e25c1ebafde81281415c39394a3b16bb05b10b"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.766925 5010 scope.go:117] "RemoveContainer" containerID="38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.767184 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xspb2" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.780115 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-94c89cc6d-zkrdd" event={"ID":"f1e7fbb2-de0b-4911-b314-db803d9f9d77","Type":"ContainerDied","Data":"7f07518e931a80021a6f54275ea0216e4acacab629c6372ef99aa37b1becc506"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.780261 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-94c89cc6d-zkrdd" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.784172 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb11973b-5bc3-4f90-979d-921d29b03c0a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.789274 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"18c7269a-4239-4da6-8f67-fbcbdc2cf38d","Type":"ContainerDied","Data":"74773908643a09997c98822011ce3b12945d31ccad422f694ac07acb476a6018"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.789314 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.805809 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" event={"ID":"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc","Type":"ContainerStarted","Data":"4248baa6552f5c6e89014c848cb17cfc27c39511119002d3f8686be69996f6fe"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.805903 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" event={"ID":"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc","Type":"ContainerStarted","Data":"be5084aac0c347562d904640aff0d31ca4f6e32ee6731847a4cbf3c42b295f02"} Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.805997 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.806131 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.812033 5010 scope.go:117] "RemoveContainer" containerID="2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.836603 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xspb2"] Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.857777 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xspb2"] Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.864791 5010 scope.go:117] "RemoveContainer" containerID="fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.867156 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" podStartSLOduration=2.867131807 podStartE2EDuration="2.867131807s" podCreationTimestamp="2025-11-26 15:49:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:55.83568365 +0000 UTC m=+1416.626400798" watchObservedRunningTime="2025-11-26 15:49:55.867131807 +0000 UTC m=+1416.657848955" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.912124 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" path="/var/lib/kubelet/pods/cb11973b-5bc3-4f90-979d-921d29b03c0a/volumes" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.913253 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-94c89cc6d-zkrdd"] Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.913934 5010 scope.go:117] "RemoveContainer" containerID="38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20" Nov 26 15:49:55 crc kubenswrapper[5010]: E1126 15:49:55.917828 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20\": container with ID starting with 38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20 not found: ID does not exist" containerID="38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.917957 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20"} err="failed to get container status \"38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20\": rpc error: code = NotFound desc = could not find container \"38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20\": container with ID starting with 38e99bfed9a8ca1261a209b965471332b2290fb5f47f2c50ef1b4cc72fcccd20 not found: ID does not exist" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.918060 5010 scope.go:117] "RemoveContainer" containerID="2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.930650 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-94c89cc6d-zkrdd"] Nov 26 15:49:55 crc kubenswrapper[5010]: E1126 15:49:55.930651 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113\": container with ID starting with 2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113 not found: ID does not exist" containerID="2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.930768 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113"} err="failed to get container status \"2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113\": rpc error: code = NotFound desc = could not find container \"2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113\": container with ID starting with 2f6386ba46d60f60906ce8c25a88ac65a2b7f7384e0c194b53a437537dc14113 not found: ID does not exist" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.930811 5010 scope.go:117] "RemoveContainer" containerID="fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d" Nov 26 15:49:55 crc kubenswrapper[5010]: E1126 15:49:55.941948 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d\": container with ID starting with fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d not found: ID does not exist" containerID="fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.942268 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d"} err="failed to get container status \"fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d\": rpc error: code = NotFound desc = could not find container \"fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d\": container with ID starting with fa07e8614979879c722d986edb50e188e0ce31107713b08d7f6ebe97a33bec0d not found: ID does not exist" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.942431 5010 scope.go:117] "RemoveContainer" containerID="de289b713a8647e5a6edd6ea75983aa642bc11037f6380f5ab8fb186a9aefc82" Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.965267 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:55 crc kubenswrapper[5010]: I1126 15:49:55.986771 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.010870 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:56 crc kubenswrapper[5010]: E1126 15:49:56.011405 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011429 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api" Nov 26 15:49:56 crc kubenswrapper[5010]: E1126 15:49:56.011440 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="cinder-scheduler" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011448 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="cinder-scheduler" Nov 26 15:49:56 crc kubenswrapper[5010]: E1126 15:49:56.011482 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="registry-server" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011490 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="registry-server" Nov 26 15:49:56 crc kubenswrapper[5010]: E1126 15:49:56.011500 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="probe" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011507 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="probe" Nov 26 15:49:56 crc kubenswrapper[5010]: E1126 15:49:56.011519 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="extract-content" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011525 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="extract-content" Nov 26 15:49:56 crc kubenswrapper[5010]: E1126 15:49:56.011534 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="extract-utilities" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011542 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="extract-utilities" Nov 26 15:49:56 crc kubenswrapper[5010]: E1126 15:49:56.011557 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api-log" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011565 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api-log" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011801 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="cinder-scheduler" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011817 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api-log" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011830 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" containerName="barbican-api" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011844 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb11973b-5bc3-4f90-979d-921d29b03c0a" containerName="registry-server" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.011855 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" containerName="probe" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.012957 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.020426 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.032606 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.098094 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-scripts\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.098175 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.098214 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0180fc92-954c-4857-9caf-4b4e5ca0c214-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.098260 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqsbd\" (UniqueName: \"kubernetes.io/projected/0180fc92-954c-4857-9caf-4b4e5ca0c214-kube-api-access-sqsbd\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.098302 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.098326 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.130943 5010 scope.go:117] "RemoveContainer" containerID="cdde83ebdc722c4c90f4ab58cc6868315adb9fed02e7f54d20177203d3502628" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.202473 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-scripts\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.202864 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.202957 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0180fc92-954c-4857-9caf-4b4e5ca0c214-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.203034 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqsbd\" (UniqueName: \"kubernetes.io/projected/0180fc92-954c-4857-9caf-4b4e5ca0c214-kube-api-access-sqsbd\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.203159 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.203251 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.204442 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0180fc92-954c-4857-9caf-4b4e5ca0c214-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.209401 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-scripts\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.216016 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.216970 5010 scope.go:117] "RemoveContainer" containerID="697863df31aa11c3b3689a39d344db5fd64f99efadba31888058e8d157be83c5" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.218135 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.221535 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.240231 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqsbd\" (UniqueName: \"kubernetes.io/projected/0180fc92-954c-4857-9caf-4b4e5ca0c214-kube-api-access-sqsbd\") pod \"cinder-scheduler-0\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.317035 5010 scope.go:117] "RemoveContainer" containerID="1b52c80569c12e1eea14351e7f52a718a2357ad66408a44bdab6778c4a6edc70" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.412995 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.672415 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.155:3000/\": dial tcp 10.217.0.155:3000: connect: connection refused" Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.847103 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" event={"ID":"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc","Type":"ContainerStarted","Data":"b34d4f722a91c454494472df034a2abe16ed2231c30fd284d1678d1f21a2a6d7"} Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.863331 5010 generic.go:334] "Generic (PLEG): container finished" podID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerID="4058ddc142f1ac9afdb6aff8eaa26bcc79c4c88fec2a045d7fc221ba0ddeb1c6" exitCode=0 Nov 26 15:49:56 crc kubenswrapper[5010]: I1126 15:49:56.863457 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerDied","Data":"4058ddc142f1ac9afdb6aff8eaa26bcc79c4c88fec2a045d7fc221ba0ddeb1c6"} Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.022023 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.231461 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.378959 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-config-data\") pod \"d6950462-52b0-49ad-b85d-d2372ff22aa8\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.379038 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-combined-ca-bundle\") pod \"d6950462-52b0-49ad-b85d-d2372ff22aa8\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.379093 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-run-httpd\") pod \"d6950462-52b0-49ad-b85d-d2372ff22aa8\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.379284 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-log-httpd\") pod \"d6950462-52b0-49ad-b85d-d2372ff22aa8\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.379329 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-sg-core-conf-yaml\") pod \"d6950462-52b0-49ad-b85d-d2372ff22aa8\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.379353 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xwxps\" (UniqueName: \"kubernetes.io/projected/d6950462-52b0-49ad-b85d-d2372ff22aa8-kube-api-access-xwxps\") pod \"d6950462-52b0-49ad-b85d-d2372ff22aa8\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.379439 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-scripts\") pod \"d6950462-52b0-49ad-b85d-d2372ff22aa8\" (UID: \"d6950462-52b0-49ad-b85d-d2372ff22aa8\") " Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.380395 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "d6950462-52b0-49ad-b85d-d2372ff22aa8" (UID: "d6950462-52b0-49ad-b85d-d2372ff22aa8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.380638 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "d6950462-52b0-49ad-b85d-d2372ff22aa8" (UID: "d6950462-52b0-49ad-b85d-d2372ff22aa8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.391066 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6950462-52b0-49ad-b85d-d2372ff22aa8-kube-api-access-xwxps" (OuterVolumeSpecName: "kube-api-access-xwxps") pod "d6950462-52b0-49ad-b85d-d2372ff22aa8" (UID: "d6950462-52b0-49ad-b85d-d2372ff22aa8"). InnerVolumeSpecName "kube-api-access-xwxps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.392007 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-scripts" (OuterVolumeSpecName: "scripts") pod "d6950462-52b0-49ad-b85d-d2372ff22aa8" (UID: "d6950462-52b0-49ad-b85d-d2372ff22aa8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.492501 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.492534 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/d6950462-52b0-49ad-b85d-d2372ff22aa8-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.492544 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xwxps\" (UniqueName: \"kubernetes.io/projected/d6950462-52b0-49ad-b85d-d2372ff22aa8-kube-api-access-xwxps\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.492553 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.493877 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "d6950462-52b0-49ad-b85d-d2372ff22aa8" (UID: "d6950462-52b0-49ad-b85d-d2372ff22aa8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.536088 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d6950462-52b0-49ad-b85d-d2372ff22aa8" (UID: "d6950462-52b0-49ad-b85d-d2372ff22aa8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.562176 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-config-data" (OuterVolumeSpecName: "config-data") pod "d6950462-52b0-49ad-b85d-d2372ff22aa8" (UID: "d6950462-52b0-49ad-b85d-d2372ff22aa8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.594541 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.594836 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.594914 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6950462-52b0-49ad-b85d-d2372ff22aa8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.907084 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18c7269a-4239-4da6-8f67-fbcbdc2cf38d" path="/var/lib/kubelet/pods/18c7269a-4239-4da6-8f67-fbcbdc2cf38d/volumes" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.907909 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1e7fbb2-de0b-4911-b314-db803d9f9d77" path="/var/lib/kubelet/pods/f1e7fbb2-de0b-4911-b314-db803d9f9d77/volumes" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.908646 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0180fc92-954c-4857-9caf-4b4e5ca0c214","Type":"ContainerStarted","Data":"8f282fec68d3dec5399eeba24be7a5eff6434de0c82f418a5de8ca79565c4637"} Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.923874 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.925447 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"d6950462-52b0-49ad-b85d-d2372ff22aa8","Type":"ContainerDied","Data":"bd5c6e42f45fef7f009660f5236398d5c09c05f8e84ac2f691045ba8742078af"} Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.940799 5010 scope.go:117] "RemoveContainer" containerID="13b66a1f285ddcb948c8ca304aaf0218edb08294e8f905e6f8bdc721290217c4" Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.966117 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:57 crc kubenswrapper[5010]: I1126 15:49:57.990824 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.011620 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:58 crc kubenswrapper[5010]: E1126 15:49:58.012064 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-notification-agent" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012082 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-notification-agent" Nov 26 15:49:58 crc kubenswrapper[5010]: E1126 15:49:58.012110 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-central-agent" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012118 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-central-agent" Nov 26 15:49:58 crc kubenswrapper[5010]: E1126 15:49:58.012148 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="sg-core" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012157 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="sg-core" Nov 26 15:49:58 crc kubenswrapper[5010]: E1126 15:49:58.012181 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="proxy-httpd" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012187 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="proxy-httpd" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012372 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-central-agent" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012390 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="sg-core" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012398 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="proxy-httpd" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.012410 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" containerName="ceilometer-notification-agent" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.014059 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.026193 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.052054 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.052049 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.080007 5010 scope.go:117] "RemoveContainer" containerID="9ceab6da40a2aaa46658284403150210370998d40158f78dfd12be664ad17edd" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.118622 5010 scope.go:117] "RemoveContainer" containerID="4058ddc142f1ac9afdb6aff8eaa26bcc79c4c88fec2a045d7fc221ba0ddeb1c6" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.148060 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-log-httpd\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.148118 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.148173 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.148236 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-config-data\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.148254 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-run-httpd\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.148326 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-scripts\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.148355 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7v4q\" (UniqueName: \"kubernetes.io/projected/eb76651e-5f57-4dee-904f-416ebe4c8306-kube-api-access-c7v4q\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.247446 5010 scope.go:117] "RemoveContainer" containerID="f6b75f622de1f2fa6dd717254a3425121af55e3b07e8946351d238dacc57d359" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.249844 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-log-httpd\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.249881 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.249915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.249956 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-config-data\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.249971 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-run-httpd\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.250019 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-scripts\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.250038 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7v4q\" (UniqueName: \"kubernetes.io/projected/eb76651e-5f57-4dee-904f-416ebe4c8306-kube-api-access-c7v4q\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.250675 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-log-httpd\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.269110 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-run-httpd\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.271332 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-scripts\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.272168 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7v4q\" (UniqueName: \"kubernetes.io/projected/eb76651e-5f57-4dee-904f-416ebe4c8306-kube-api-access-c7v4q\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.274400 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-config-data\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.274678 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.289200 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.381782 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.764381 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 15:49:58 crc kubenswrapper[5010]: I1126 15:49:58.882899 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:49:59 crc kubenswrapper[5010]: I1126 15:49:59.000000 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0180fc92-954c-4857-9caf-4b4e5ca0c214","Type":"ContainerStarted","Data":"481fb5fc62e4c26b123d374b9f9cf253a34458e076080fc635b7a5858df4367e"} Nov 26 15:49:59 crc kubenswrapper[5010]: I1126 15:49:59.000315 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0180fc92-954c-4857-9caf-4b4e5ca0c214","Type":"ContainerStarted","Data":"9d2743e44261566a3d1c21cfb428ead84b6cb4a37eb99da6c1bacd18a1287645"} Nov 26 15:49:59 crc kubenswrapper[5010]: I1126 15:49:59.003828 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerStarted","Data":"6d5e809396628aad71d305907db302423eeb6123f255cc88b79ea807aae00823"} Nov 26 15:49:59 crc kubenswrapper[5010]: I1126 15:49:59.041129 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.041112923 podStartE2EDuration="4.041112923s" podCreationTimestamp="2025-11-26 15:49:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:49:59.039090973 +0000 UTC m=+1419.829808121" watchObservedRunningTime="2025-11-26 15:49:59.041112923 +0000 UTC m=+1419.831830071" Nov 26 15:49:59 crc kubenswrapper[5010]: I1126 15:49:59.906146 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6950462-52b0-49ad-b85d-d2372ff22aa8" path="/var/lib/kubelet/pods/d6950462-52b0-49ad-b85d-d2372ff22aa8/volumes" Nov 26 15:50:00 crc kubenswrapper[5010]: I1126 15:50:00.132178 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:00 crc kubenswrapper[5010]: I1126 15:50:00.303904 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:50:00 crc kubenswrapper[5010]: I1126 15:50:00.367292 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-667fd8655c-pp657"] Nov 26 15:50:00 crc kubenswrapper[5010]: I1126 15:50:00.367565 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-667fd8655c-pp657" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="dnsmasq-dns" containerID="cri-o://029c55062d788ba70d12a95d7deb331ec9ef320512ee7519131e8ba85e747280" gracePeriod=10 Nov 26 15:50:01 crc kubenswrapper[5010]: I1126 15:50:01.026536 5010 generic.go:334] "Generic (PLEG): container finished" podID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerID="029c55062d788ba70d12a95d7deb331ec9ef320512ee7519131e8ba85e747280" exitCode=0 Nov 26 15:50:01 crc kubenswrapper[5010]: I1126 15:50:01.026611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667fd8655c-pp657" event={"ID":"eccd03a4-6d90-40d9-b371-7e6737f11862","Type":"ContainerDied","Data":"029c55062d788ba70d12a95d7deb331ec9ef320512ee7519131e8ba85e747280"} Nov 26 15:50:01 crc kubenswrapper[5010]: I1126 15:50:01.415282 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 15:50:01 crc kubenswrapper[5010]: I1126 15:50:01.998678 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-667fd8655c-pp657" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 26 15:50:04 crc kubenswrapper[5010]: I1126 15:50:04.207596 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:50:04 crc kubenswrapper[5010]: I1126 15:50:04.216860 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:50:06 crc kubenswrapper[5010]: I1126 15:50:06.685387 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 15:50:06 crc kubenswrapper[5010]: I1126 15:50:06.998716 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-667fd8655c-pp657" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.147:5353: connect: connection refused" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.429671 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.607665 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-swift-storage-0\") pod \"eccd03a4-6d90-40d9-b371-7e6737f11862\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.607790 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-svc\") pod \"eccd03a4-6d90-40d9-b371-7e6737f11862\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.607894 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-sb\") pod \"eccd03a4-6d90-40d9-b371-7e6737f11862\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.607942 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkff9\" (UniqueName: \"kubernetes.io/projected/eccd03a4-6d90-40d9-b371-7e6737f11862-kube-api-access-hkff9\") pod \"eccd03a4-6d90-40d9-b371-7e6737f11862\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.608085 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-nb\") pod \"eccd03a4-6d90-40d9-b371-7e6737f11862\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.608122 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-config\") pod \"eccd03a4-6d90-40d9-b371-7e6737f11862\" (UID: \"eccd03a4-6d90-40d9-b371-7e6737f11862\") " Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.615643 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eccd03a4-6d90-40d9-b371-7e6737f11862-kube-api-access-hkff9" (OuterVolumeSpecName: "kube-api-access-hkff9") pod "eccd03a4-6d90-40d9-b371-7e6737f11862" (UID: "eccd03a4-6d90-40d9-b371-7e6737f11862"). InnerVolumeSpecName "kube-api-access-hkff9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.666481 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eccd03a4-6d90-40d9-b371-7e6737f11862" (UID: "eccd03a4-6d90-40d9-b371-7e6737f11862"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.672024 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eccd03a4-6d90-40d9-b371-7e6737f11862" (UID: "eccd03a4-6d90-40d9-b371-7e6737f11862"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.675025 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "eccd03a4-6d90-40d9-b371-7e6737f11862" (UID: "eccd03a4-6d90-40d9-b371-7e6737f11862"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.680120 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "eccd03a4-6d90-40d9-b371-7e6737f11862" (UID: "eccd03a4-6d90-40d9-b371-7e6737f11862"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.687842 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-config" (OuterVolumeSpecName: "config") pod "eccd03a4-6d90-40d9-b371-7e6737f11862" (UID: "eccd03a4-6d90-40d9-b371-7e6737f11862"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.710669 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.710738 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.710752 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.710765 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.710776 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eccd03a4-6d90-40d9-b371-7e6737f11862-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:09 crc kubenswrapper[5010]: I1126 15:50:09.710790 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkff9\" (UniqueName: \"kubernetes.io/projected/eccd03a4-6d90-40d9-b371-7e6737f11862-kube-api-access-hkff9\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.114390 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"08acaf58-5c2f-4fb4-8863-846c28f8d016","Type":"ContainerStarted","Data":"e30a58057f8e14429694a2b07ec64cfe7a7ea07313dd194b06c05df065dded6f"} Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.116094 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerStarted","Data":"8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0"} Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.118197 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-667fd8655c-pp657" event={"ID":"eccd03a4-6d90-40d9-b371-7e6737f11862","Type":"ContainerDied","Data":"e4dab88cc5becab8887ad535dbf15caa6ac210319320389600d3dba9897e163c"} Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.118258 5010 scope.go:117] "RemoveContainer" containerID="029c55062d788ba70d12a95d7deb331ec9ef320512ee7519131e8ba85e747280" Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.118322 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-667fd8655c-pp657" Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.140812 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.901299374 podStartE2EDuration="25.14078615s" podCreationTimestamp="2025-11-26 15:49:45 +0000 UTC" firstStartedPulling="2025-11-26 15:49:46.909921324 +0000 UTC m=+1407.700638472" lastFinishedPulling="2025-11-26 15:50:09.14940811 +0000 UTC m=+1429.940125248" observedRunningTime="2025-11-26 15:50:10.130030671 +0000 UTC m=+1430.920747819" watchObservedRunningTime="2025-11-26 15:50:10.14078615 +0000 UTC m=+1430.931503298" Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.190827 5010 scope.go:117] "RemoveContainer" containerID="2d4cda749f7341b684514100f0579125b1a945f21417bca957f98a5ebaeddb10" Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.202954 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-667fd8655c-pp657"] Nov 26 15:50:10 crc kubenswrapper[5010]: I1126 15:50:10.210482 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-667fd8655c-pp657"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.135010 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerStarted","Data":"89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9"} Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.135337 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerStarted","Data":"d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f"} Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.373923 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-q5wnb"] Nov 26 15:50:11 crc kubenswrapper[5010]: E1126 15:50:11.375415 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="init" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.375805 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="init" Nov 26 15:50:11 crc kubenswrapper[5010]: E1126 15:50:11.375854 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="dnsmasq-dns" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.375864 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="dnsmasq-dns" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.376135 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" containerName="dnsmasq-dns" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.376988 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.391667 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-q5wnb"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.499391 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-fj5jk"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.501050 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.509870 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-fj5jk"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.549302 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-operator-scripts\") pod \"nova-api-db-create-q5wnb\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.549651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjf7s\" (UniqueName: \"kubernetes.io/projected/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-kube-api-access-fjf7s\") pod \"nova-api-db-create-q5wnb\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.582099 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f128-account-create-update-xklvh"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.583572 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.585638 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.588910 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f128-account-create-update-xklvh"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.652607 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-operator-scripts\") pod \"nova-api-db-create-q5wnb\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.652781 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjf7s\" (UniqueName: \"kubernetes.io/projected/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-kube-api-access-fjf7s\") pod \"nova-api-db-create-q5wnb\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.652867 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc74f571-aa5a-4030-800c-2945c869fdd5-operator-scripts\") pod \"nova-cell0-db-create-fj5jk\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.652903 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xf5r7\" (UniqueName: \"kubernetes.io/projected/fc74f571-aa5a-4030-800c-2945c869fdd5-kube-api-access-xf5r7\") pod \"nova-cell0-db-create-fj5jk\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.653649 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-operator-scripts\") pod \"nova-api-db-create-q5wnb\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.675680 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-knxrp"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.676977 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.687459 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-knxrp"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.690504 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjf7s\" (UniqueName: \"kubernetes.io/projected/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-kube-api-access-fjf7s\") pod \"nova-api-db-create-q5wnb\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.698896 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.755965 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc74f571-aa5a-4030-800c-2945c869fdd5-operator-scripts\") pod \"nova-cell0-db-create-fj5jk\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.756024 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnvrg\" (UniqueName: \"kubernetes.io/projected/7f3aae36-d899-446c-9cf0-9ee7c7218c98-kube-api-access-tnvrg\") pod \"nova-api-f128-account-create-update-xklvh\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.756057 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xf5r7\" (UniqueName: \"kubernetes.io/projected/fc74f571-aa5a-4030-800c-2945c869fdd5-kube-api-access-xf5r7\") pod \"nova-cell0-db-create-fj5jk\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.756096 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f3aae36-d899-446c-9cf0-9ee7c7218c98-operator-scripts\") pod \"nova-api-f128-account-create-update-xklvh\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.757220 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc74f571-aa5a-4030-800c-2945c869fdd5-operator-scripts\") pod \"nova-cell0-db-create-fj5jk\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.785491 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xf5r7\" (UniqueName: \"kubernetes.io/projected/fc74f571-aa5a-4030-800c-2945c869fdd5-kube-api-access-xf5r7\") pod \"nova-cell0-db-create-fj5jk\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.792156 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-e911-account-create-update-x6wmv"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.793477 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.796788 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.805507 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e911-account-create-update-x6wmv"] Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.832471 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.858726 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-operator-scripts\") pod \"nova-cell1-db-create-knxrp\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.858809 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnvrg\" (UniqueName: \"kubernetes.io/projected/7f3aae36-d899-446c-9cf0-9ee7c7218c98-kube-api-access-tnvrg\") pod \"nova-api-f128-account-create-update-xklvh\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.858918 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f3aae36-d899-446c-9cf0-9ee7c7218c98-operator-scripts\") pod \"nova-api-f128-account-create-update-xklvh\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.859043 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfb22\" (UniqueName: \"kubernetes.io/projected/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-kube-api-access-mfb22\") pod \"nova-cell1-db-create-knxrp\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.859918 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f3aae36-d899-446c-9cf0-9ee7c7218c98-operator-scripts\") pod \"nova-api-f128-account-create-update-xklvh\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.905464 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnvrg\" (UniqueName: \"kubernetes.io/projected/7f3aae36-d899-446c-9cf0-9ee7c7218c98-kube-api-access-tnvrg\") pod \"nova-api-f128-account-create-update-xklvh\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.944611 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eccd03a4-6d90-40d9-b371-7e6737f11862" path="/var/lib/kubelet/pods/eccd03a4-6d90-40d9-b371-7e6737f11862/volumes" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.961099 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfb22\" (UniqueName: \"kubernetes.io/projected/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-kube-api-access-mfb22\") pod \"nova-cell1-db-create-knxrp\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.961186 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb0e3931-24cf-4410-98c7-74cba52c93ae-operator-scripts\") pod \"nova-cell0-e911-account-create-update-x6wmv\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.965094 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zg8q\" (UniqueName: \"kubernetes.io/projected/bb0e3931-24cf-4410-98c7-74cba52c93ae-kube-api-access-8zg8q\") pod \"nova-cell0-e911-account-create-update-x6wmv\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.965234 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-operator-scripts\") pod \"nova-cell1-db-create-knxrp\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:11 crc kubenswrapper[5010]: I1126 15:50:11.966340 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-operator-scripts\") pod \"nova-cell1-db-create-knxrp\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.001536 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-b07e-account-create-update-wpwwt"] Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.013977 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b07e-account-create-update-wpwwt"] Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.014069 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.045364 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfb22\" (UniqueName: \"kubernetes.io/projected/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-kube-api-access-mfb22\") pod \"nova-cell1-db-create-knxrp\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.047296 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.072321 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb0e3931-24cf-4410-98c7-74cba52c93ae-operator-scripts\") pod \"nova-cell0-e911-account-create-update-x6wmv\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.072376 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zg8q\" (UniqueName: \"kubernetes.io/projected/bb0e3931-24cf-4410-98c7-74cba52c93ae-kube-api-access-8zg8q\") pod \"nova-cell0-e911-account-create-update-x6wmv\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.074030 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb0e3931-24cf-4410-98c7-74cba52c93ae-operator-scripts\") pod \"nova-cell0-e911-account-create-update-x6wmv\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.103915 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zg8q\" (UniqueName: \"kubernetes.io/projected/bb0e3931-24cf-4410-98c7-74cba52c93ae-kube-api-access-8zg8q\") pod \"nova-cell0-e911-account-create-update-x6wmv\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.181334 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-operator-scripts\") pod \"nova-cell1-b07e-account-create-update-wpwwt\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.181467 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7s2g\" (UniqueName: \"kubernetes.io/projected/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-kube-api-access-t7s2g\") pod \"nova-cell1-b07e-account-create-update-wpwwt\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.182168 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.204079 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.283263 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7s2g\" (UniqueName: \"kubernetes.io/projected/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-kube-api-access-t7s2g\") pod \"nova-cell1-b07e-account-create-update-wpwwt\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.283701 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-operator-scripts\") pod \"nova-cell1-b07e-account-create-update-wpwwt\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.284735 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-operator-scripts\") pod \"nova-cell1-b07e-account-create-update-wpwwt\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.303248 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7s2g\" (UniqueName: \"kubernetes.io/projected/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-kube-api-access-t7s2g\") pod \"nova-cell1-b07e-account-create-update-wpwwt\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.356674 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.398847 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.474095 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-q5wnb"] Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.558836 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-fj5jk"] Nov 26 15:50:12 crc kubenswrapper[5010]: W1126 15:50:12.590115 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfc74f571_aa5a_4030_800c_2945c869fdd5.slice/crio-3d0f41a13c8006627cdd60fb0be16f7df4eae262b840dd3d04b59d430c736ebd WatchSource:0}: Error finding container 3d0f41a13c8006627cdd60fb0be16f7df4eae262b840dd3d04b59d430c736ebd: Status 404 returned error can't find the container with id 3d0f41a13c8006627cdd60fb0be16f7df4eae262b840dd3d04b59d430c736ebd Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.705901 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f128-account-create-update-xklvh"] Nov 26 15:50:12 crc kubenswrapper[5010]: I1126 15:50:12.819261 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-e911-account-create-update-x6wmv"] Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.108465 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b07e-account-create-update-wpwwt"] Nov 26 15:50:13 crc kubenswrapper[5010]: W1126 15:50:13.114328 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0756834c_f4fd_4aaa_b3c9_d00fad779b4b.slice/crio-239963e232d3eaf4e90251dbfb7afec15f405ce4495388a6207e35c59a032211 WatchSource:0}: Error finding container 239963e232d3eaf4e90251dbfb7afec15f405ce4495388a6207e35c59a032211: Status 404 returned error can't find the container with id 239963e232d3eaf4e90251dbfb7afec15f405ce4495388a6207e35c59a032211 Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.227788 5010 generic.go:334] "Generic (PLEG): container finished" podID="7863fcf1-2cbd-44d2-8db8-bb9c896f70c4" containerID="0cfd653091e91e0ee452317bab6a6a58cc99cd5b9eff404b2e0356747e85fbfa" exitCode=0 Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.228670 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-q5wnb" event={"ID":"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4","Type":"ContainerDied","Data":"0cfd653091e91e0ee452317bab6a6a58cc99cd5b9eff404b2e0356747e85fbfa"} Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.228699 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-q5wnb" event={"ID":"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4","Type":"ContainerStarted","Data":"7be3a3edcecce432e365452a952e555017f0dcc1a3e5effc74dc2fc3d0375d5d"} Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.245975 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f128-account-create-update-xklvh" event={"ID":"7f3aae36-d899-446c-9cf0-9ee7c7218c98","Type":"ContainerStarted","Data":"ff9af31005160b79dc732a86de3fd2a28d6884297c416262f3e5bf759795690f"} Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.254808 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e911-account-create-update-x6wmv" event={"ID":"bb0e3931-24cf-4410-98c7-74cba52c93ae","Type":"ContainerStarted","Data":"c2fe5488c3566ce92fdc5fe705cbea60fac86363cd0ca37f793309aa904cac08"} Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.257407 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fj5jk" event={"ID":"fc74f571-aa5a-4030-800c-2945c869fdd5","Type":"ContainerStarted","Data":"3d0f41a13c8006627cdd60fb0be16f7df4eae262b840dd3d04b59d430c736ebd"} Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.260440 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" event={"ID":"0756834c-f4fd-4aaa-b3c9-d00fad779b4b","Type":"ContainerStarted","Data":"239963e232d3eaf4e90251dbfb7afec15f405ce4495388a6207e35c59a032211"} Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.270935 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-knxrp"] Nov 26 15:50:13 crc kubenswrapper[5010]: W1126 15:50:13.279567 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7432b1a0_ae0e_4db9_8295_cc11d1d657e7.slice/crio-3b1d8125b4b8bcaf7f77bd14190bf9f2c48d717b6d121e3db1193fc4cbea3459 WatchSource:0}: Error finding container 3b1d8125b4b8bcaf7f77bd14190bf9f2c48d717b6d121e3db1193fc4cbea3459: Status 404 returned error can't find the container with id 3b1d8125b4b8bcaf7f77bd14190bf9f2c48d717b6d121e3db1193fc4cbea3459 Nov 26 15:50:13 crc kubenswrapper[5010]: I1126 15:50:13.304521 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-fj5jk" podStartSLOduration=2.304494729 podStartE2EDuration="2.304494729s" podCreationTimestamp="2025-11-26 15:50:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:50:13.278436107 +0000 UTC m=+1434.069153255" watchObservedRunningTime="2025-11-26 15:50:13.304494729 +0000 UTC m=+1434.095211887" Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.280772 5010 generic.go:334] "Generic (PLEG): container finished" podID="7f3aae36-d899-446c-9cf0-9ee7c7218c98" containerID="8e37e04d466ca260e3f8f7ff81def050743b2ca8bf2294353ecc4ab35c6e4e5f" exitCode=0 Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.281314 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f128-account-create-update-xklvh" event={"ID":"7f3aae36-d899-446c-9cf0-9ee7c7218c98","Type":"ContainerDied","Data":"8e37e04d466ca260e3f8f7ff81def050743b2ca8bf2294353ecc4ab35c6e4e5f"} Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.284687 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-knxrp" event={"ID":"7432b1a0-ae0e-4db9-8295-cc11d1d657e7","Type":"ContainerStarted","Data":"9e380480e0ed87dc131e2bebbe255dfbe9ddfc2684fa87e255d79aa89c5c350c"} Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.284736 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-knxrp" event={"ID":"7432b1a0-ae0e-4db9-8295-cc11d1d657e7","Type":"ContainerStarted","Data":"3b1d8125b4b8bcaf7f77bd14190bf9f2c48d717b6d121e3db1193fc4cbea3459"} Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.286655 5010 generic.go:334] "Generic (PLEG): container finished" podID="bb0e3931-24cf-4410-98c7-74cba52c93ae" containerID="1c95b91556706bc0e785fef8d7a8aa8557546e4cb115de25ca1349b5cc34538f" exitCode=0 Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.286696 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e911-account-create-update-x6wmv" event={"ID":"bb0e3931-24cf-4410-98c7-74cba52c93ae","Type":"ContainerDied","Data":"1c95b91556706bc0e785fef8d7a8aa8557546e4cb115de25ca1349b5cc34538f"} Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.290716 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerStarted","Data":"1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4"} Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.290939 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-central-agent" containerID="cri-o://8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0" gracePeriod=30 Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.291293 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.291351 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="proxy-httpd" containerID="cri-o://1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4" gracePeriod=30 Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.291408 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="sg-core" containerID="cri-o://89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9" gracePeriod=30 Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.291487 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-notification-agent" containerID="cri-o://d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f" gracePeriod=30 Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.307898 5010 generic.go:334] "Generic (PLEG): container finished" podID="fc74f571-aa5a-4030-800c-2945c869fdd5" containerID="bd5dcd159d98522a1fa453a1295f760782d174209dffc2ff416e800b67e207f6" exitCode=0 Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.307988 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fj5jk" event={"ID":"fc74f571-aa5a-4030-800c-2945c869fdd5","Type":"ContainerDied","Data":"bd5dcd159d98522a1fa453a1295f760782d174209dffc2ff416e800b67e207f6"} Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.322146 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" event={"ID":"0756834c-f4fd-4aaa-b3c9-d00fad779b4b","Type":"ContainerStarted","Data":"0f45d7eea2abefc4913d1a7e50d9d1584398760133b14b1ec28110f9bc2a3322"} Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.339963 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-knxrp" podStartSLOduration=3.33994685 podStartE2EDuration="3.33994685s" podCreationTimestamp="2025-11-26 15:50:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:50:14.335185701 +0000 UTC m=+1435.125902859" watchObservedRunningTime="2025-11-26 15:50:14.33994685 +0000 UTC m=+1435.130663998" Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.374503 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.7151549790000002 podStartE2EDuration="17.374486654s" podCreationTimestamp="2025-11-26 15:49:57 +0000 UTC" firstStartedPulling="2025-11-26 15:49:58.916338922 +0000 UTC m=+1419.707056070" lastFinishedPulling="2025-11-26 15:50:12.575670597 +0000 UTC m=+1433.366387745" observedRunningTime="2025-11-26 15:50:14.356922605 +0000 UTC m=+1435.147639753" watchObservedRunningTime="2025-11-26 15:50:14.374486654 +0000 UTC m=+1435.165203812" Nov 26 15:50:14 crc kubenswrapper[5010]: I1126 15:50:14.390232 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" podStartSLOduration=3.390212687 podStartE2EDuration="3.390212687s" podCreationTimestamp="2025-11-26 15:50:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:50:14.372149896 +0000 UTC m=+1435.162867044" watchObservedRunningTime="2025-11-26 15:50:14.390212687 +0000 UTC m=+1435.180929835" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.002954 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.148421 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjf7s\" (UniqueName: \"kubernetes.io/projected/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-kube-api-access-fjf7s\") pod \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.148625 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-operator-scripts\") pod \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\" (UID: \"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4\") " Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.148992 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7863fcf1-2cbd-44d2-8db8-bb9c896f70c4" (UID: "7863fcf1-2cbd-44d2-8db8-bb9c896f70c4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.149166 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.161925 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-kube-api-access-fjf7s" (OuterVolumeSpecName: "kube-api-access-fjf7s") pod "7863fcf1-2cbd-44d2-8db8-bb9c896f70c4" (UID: "7863fcf1-2cbd-44d2-8db8-bb9c896f70c4"). InnerVolumeSpecName "kube-api-access-fjf7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.250703 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjf7s\" (UniqueName: \"kubernetes.io/projected/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4-kube-api-access-fjf7s\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.331748 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-q5wnb" event={"ID":"7863fcf1-2cbd-44d2-8db8-bb9c896f70c4","Type":"ContainerDied","Data":"7be3a3edcecce432e365452a952e555017f0dcc1a3e5effc74dc2fc3d0375d5d"} Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.331785 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-q5wnb" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.331790 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7be3a3edcecce432e365452a952e555017f0dcc1a3e5effc74dc2fc3d0375d5d" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.332829 5010 generic.go:334] "Generic (PLEG): container finished" podID="7432b1a0-ae0e-4db9-8295-cc11d1d657e7" containerID="9e380480e0ed87dc131e2bebbe255dfbe9ddfc2684fa87e255d79aa89c5c350c" exitCode=0 Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.332871 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-knxrp" event={"ID":"7432b1a0-ae0e-4db9-8295-cc11d1d657e7","Type":"ContainerDied","Data":"9e380480e0ed87dc131e2bebbe255dfbe9ddfc2684fa87e255d79aa89c5c350c"} Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.341112 5010 generic.go:334] "Generic (PLEG): container finished" podID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerID="1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4" exitCode=0 Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.341141 5010 generic.go:334] "Generic (PLEG): container finished" podID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerID="89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9" exitCode=2 Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.341560 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerDied","Data":"1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4"} Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.341632 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerDied","Data":"89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9"} Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.344694 5010 generic.go:334] "Generic (PLEG): container finished" podID="0756834c-f4fd-4aaa-b3c9-d00fad779b4b" containerID="0f45d7eea2abefc4913d1a7e50d9d1584398760133b14b1ec28110f9bc2a3322" exitCode=0 Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.344747 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" event={"ID":"0756834c-f4fd-4aaa-b3c9-d00fad779b4b","Type":"ContainerDied","Data":"0f45d7eea2abefc4913d1a7e50d9d1584398760133b14b1ec28110f9bc2a3322"} Nov 26 15:50:15 crc kubenswrapper[5010]: E1126 15:50:15.367278 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7863fcf1_2cbd_44d2_8db8_bb9c896f70c4.slice\": RecentStats: unable to find data in memory cache]" Nov 26 15:50:15 crc kubenswrapper[5010]: I1126 15:50:15.879286 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.061050 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.069450 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnvrg\" (UniqueName: \"kubernetes.io/projected/7f3aae36-d899-446c-9cf0-9ee7c7218c98-kube-api-access-tnvrg\") pod \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.069560 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f3aae36-d899-446c-9cf0-9ee7c7218c98-operator-scripts\") pod \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\" (UID: \"7f3aae36-d899-446c-9cf0-9ee7c7218c98\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.070188 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f3aae36-d899-446c-9cf0-9ee7c7218c98-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7f3aae36-d899-446c-9cf0-9ee7c7218c98" (UID: "7f3aae36-d899-446c-9cf0-9ee7c7218c98"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.071871 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7f3aae36-d899-446c-9cf0-9ee7c7218c98-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.086787 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f3aae36-d899-446c-9cf0-9ee7c7218c98-kube-api-access-tnvrg" (OuterVolumeSpecName: "kube-api-access-tnvrg") pod "7f3aae36-d899-446c-9cf0-9ee7c7218c98" (UID: "7f3aae36-d899-446c-9cf0-9ee7c7218c98"). InnerVolumeSpecName "kube-api-access-tnvrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.133191 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.172862 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc74f571-aa5a-4030-800c-2945c869fdd5-operator-scripts\") pod \"fc74f571-aa5a-4030-800c-2945c869fdd5\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.173017 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xf5r7\" (UniqueName: \"kubernetes.io/projected/fc74f571-aa5a-4030-800c-2945c869fdd5-kube-api-access-xf5r7\") pod \"fc74f571-aa5a-4030-800c-2945c869fdd5\" (UID: \"fc74f571-aa5a-4030-800c-2945c869fdd5\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.173562 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnvrg\" (UniqueName: \"kubernetes.io/projected/7f3aae36-d899-446c-9cf0-9ee7c7218c98-kube-api-access-tnvrg\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.174097 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc74f571-aa5a-4030-800c-2945c869fdd5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fc74f571-aa5a-4030-800c-2945c869fdd5" (UID: "fc74f571-aa5a-4030-800c-2945c869fdd5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.191027 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc74f571-aa5a-4030-800c-2945c869fdd5-kube-api-access-xf5r7" (OuterVolumeSpecName: "kube-api-access-xf5r7") pod "fc74f571-aa5a-4030-800c-2945c869fdd5" (UID: "fc74f571-aa5a-4030-800c-2945c869fdd5"). InnerVolumeSpecName "kube-api-access-xf5r7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.275698 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb0e3931-24cf-4410-98c7-74cba52c93ae-operator-scripts\") pod \"bb0e3931-24cf-4410-98c7-74cba52c93ae\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.275819 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8zg8q\" (UniqueName: \"kubernetes.io/projected/bb0e3931-24cf-4410-98c7-74cba52c93ae-kube-api-access-8zg8q\") pod \"bb0e3931-24cf-4410-98c7-74cba52c93ae\" (UID: \"bb0e3931-24cf-4410-98c7-74cba52c93ae\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.276513 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bb0e3931-24cf-4410-98c7-74cba52c93ae-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bb0e3931-24cf-4410-98c7-74cba52c93ae" (UID: "bb0e3931-24cf-4410-98c7-74cba52c93ae"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.276561 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fc74f571-aa5a-4030-800c-2945c869fdd5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.276584 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xf5r7\" (UniqueName: \"kubernetes.io/projected/fc74f571-aa5a-4030-800c-2945c869fdd5-kube-api-access-xf5r7\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.279217 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb0e3931-24cf-4410-98c7-74cba52c93ae-kube-api-access-8zg8q" (OuterVolumeSpecName: "kube-api-access-8zg8q") pod "bb0e3931-24cf-4410-98c7-74cba52c93ae" (UID: "bb0e3931-24cf-4410-98c7-74cba52c93ae"). InnerVolumeSpecName "kube-api-access-8zg8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.355128 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-e911-account-create-update-x6wmv" event={"ID":"bb0e3931-24cf-4410-98c7-74cba52c93ae","Type":"ContainerDied","Data":"c2fe5488c3566ce92fdc5fe705cbea60fac86363cd0ca37f793309aa904cac08"} Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.355176 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2fe5488c3566ce92fdc5fe705cbea60fac86363cd0ca37f793309aa904cac08" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.355184 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-e911-account-create-update-x6wmv" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.357896 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-fj5jk" event={"ID":"fc74f571-aa5a-4030-800c-2945c869fdd5","Type":"ContainerDied","Data":"3d0f41a13c8006627cdd60fb0be16f7df4eae262b840dd3d04b59d430c736ebd"} Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.357937 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d0f41a13c8006627cdd60fb0be16f7df4eae262b840dd3d04b59d430c736ebd" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.358257 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-fj5jk" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.361036 5010 generic.go:334] "Generic (PLEG): container finished" podID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerID="d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f" exitCode=0 Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.361088 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerDied","Data":"d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f"} Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.362851 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f128-account-create-update-xklvh" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.362863 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f128-account-create-update-xklvh" event={"ID":"7f3aae36-d899-446c-9cf0-9ee7c7218c98","Type":"ContainerDied","Data":"ff9af31005160b79dc732a86de3fd2a28d6884297c416262f3e5bf759795690f"} Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.362914 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff9af31005160b79dc732a86de3fd2a28d6884297c416262f3e5bf759795690f" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.378591 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bb0e3931-24cf-4410-98c7-74cba52c93ae-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.378885 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8zg8q\" (UniqueName: \"kubernetes.io/projected/bb0e3931-24cf-4410-98c7-74cba52c93ae-kube-api-access-8zg8q\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.665197 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.665834 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-log" containerID="cri-o://79c2fb1f54ff9a95cc4dcaa6cb5962ca09af991addf2ded2b30b24b2b7bbdfbe" gracePeriod=30 Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.666332 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-httpd" containerID="cri-o://f5c52d88f44b865b5096b50805d5e2f59cef8516541529d9fdf1ac840da5d9c1" gracePeriod=30 Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.762168 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.865396 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.889482 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-operator-scripts\") pod \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.889556 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7s2g\" (UniqueName: \"kubernetes.io/projected/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-kube-api-access-t7s2g\") pod \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\" (UID: \"0756834c-f4fd-4aaa-b3c9-d00fad779b4b\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.891203 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0756834c-f4fd-4aaa-b3c9-d00fad779b4b" (UID: "0756834c-f4fd-4aaa-b3c9-d00fad779b4b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.896192 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-kube-api-access-t7s2g" (OuterVolumeSpecName: "kube-api-access-t7s2g") pod "0756834c-f4fd-4aaa-b3c9-d00fad779b4b" (UID: "0756834c-f4fd-4aaa-b3c9-d00fad779b4b"). InnerVolumeSpecName "kube-api-access-t7s2g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.991038 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-operator-scripts\") pod \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.991196 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mfb22\" (UniqueName: \"kubernetes.io/projected/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-kube-api-access-mfb22\") pod \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\" (UID: \"7432b1a0-ae0e-4db9-8295-cc11d1d657e7\") " Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.991854 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7432b1a0-ae0e-4db9-8295-cc11d1d657e7" (UID: "7432b1a0-ae0e-4db9-8295-cc11d1d657e7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.991934 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.991963 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7s2g\" (UniqueName: \"kubernetes.io/projected/0756834c-f4fd-4aaa-b3c9-d00fad779b4b-kube-api-access-t7s2g\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:16 crc kubenswrapper[5010]: I1126 15:50:16.995997 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-kube-api-access-mfb22" (OuterVolumeSpecName: "kube-api-access-mfb22") pod "7432b1a0-ae0e-4db9-8295-cc11d1d657e7" (UID: "7432b1a0-ae0e-4db9-8295-cc11d1d657e7"). InnerVolumeSpecName "kube-api-access-mfb22". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.093242 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.093278 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mfb22\" (UniqueName: \"kubernetes.io/projected/7432b1a0-ae0e-4db9-8295-cc11d1d657e7-kube-api-access-mfb22\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.375716 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-knxrp" Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.375768 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-knxrp" event={"ID":"7432b1a0-ae0e-4db9-8295-cc11d1d657e7","Type":"ContainerDied","Data":"3b1d8125b4b8bcaf7f77bd14190bf9f2c48d717b6d121e3db1193fc4cbea3459"} Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.375828 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b1d8125b4b8bcaf7f77bd14190bf9f2c48d717b6d121e3db1193fc4cbea3459" Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.378471 5010 generic.go:334] "Generic (PLEG): container finished" podID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerID="79c2fb1f54ff9a95cc4dcaa6cb5962ca09af991addf2ded2b30b24b2b7bbdfbe" exitCode=143 Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.378561 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4496d49-7b88-4c60-9fd5-fe0608f52b13","Type":"ContainerDied","Data":"79c2fb1f54ff9a95cc4dcaa6cb5962ca09af991addf2ded2b30b24b2b7bbdfbe"} Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.381722 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" event={"ID":"0756834c-f4fd-4aaa-b3c9-d00fad779b4b","Type":"ContainerDied","Data":"239963e232d3eaf4e90251dbfb7afec15f405ce4495388a6207e35c59a032211"} Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.381810 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="239963e232d3eaf4e90251dbfb7afec15f405ce4495388a6207e35c59a032211" Nov 26 15:50:17 crc kubenswrapper[5010]: I1126 15:50:17.381892 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b07e-account-create-update-wpwwt" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.087369 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.217221 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-scripts\") pod \"eb76651e-5f57-4dee-904f-416ebe4c8306\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.217273 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-run-httpd\") pod \"eb76651e-5f57-4dee-904f-416ebe4c8306\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.217360 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-sg-core-conf-yaml\") pod \"eb76651e-5f57-4dee-904f-416ebe4c8306\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.217462 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7v4q\" (UniqueName: \"kubernetes.io/projected/eb76651e-5f57-4dee-904f-416ebe4c8306-kube-api-access-c7v4q\") pod \"eb76651e-5f57-4dee-904f-416ebe4c8306\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.217542 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-log-httpd\") pod \"eb76651e-5f57-4dee-904f-416ebe4c8306\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.217647 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-config-data\") pod \"eb76651e-5f57-4dee-904f-416ebe4c8306\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.218026 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "eb76651e-5f57-4dee-904f-416ebe4c8306" (UID: "eb76651e-5f57-4dee-904f-416ebe4c8306"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.218088 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-combined-ca-bundle\") pod \"eb76651e-5f57-4dee-904f-416ebe4c8306\" (UID: \"eb76651e-5f57-4dee-904f-416ebe4c8306\") " Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.218490 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "eb76651e-5f57-4dee-904f-416ebe4c8306" (UID: "eb76651e-5f57-4dee-904f-416ebe4c8306"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.219068 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.219091 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/eb76651e-5f57-4dee-904f-416ebe4c8306-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.222977 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb76651e-5f57-4dee-904f-416ebe4c8306-kube-api-access-c7v4q" (OuterVolumeSpecName: "kube-api-access-c7v4q") pod "eb76651e-5f57-4dee-904f-416ebe4c8306" (UID: "eb76651e-5f57-4dee-904f-416ebe4c8306"). InnerVolumeSpecName "kube-api-access-c7v4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.229027 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-scripts" (OuterVolumeSpecName: "scripts") pod "eb76651e-5f57-4dee-904f-416ebe4c8306" (UID: "eb76651e-5f57-4dee-904f-416ebe4c8306"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.253822 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "eb76651e-5f57-4dee-904f-416ebe4c8306" (UID: "eb76651e-5f57-4dee-904f-416ebe4c8306"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.321263 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7v4q\" (UniqueName: \"kubernetes.io/projected/eb76651e-5f57-4dee-904f-416ebe4c8306-kube-api-access-c7v4q\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.321326 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.321339 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.335532 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "eb76651e-5f57-4dee-904f-416ebe4c8306" (UID: "eb76651e-5f57-4dee-904f-416ebe4c8306"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.361330 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-config-data" (OuterVolumeSpecName: "config-data") pod "eb76651e-5f57-4dee-904f-416ebe4c8306" (UID: "eb76651e-5f57-4dee-904f-416ebe4c8306"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.393646 5010 generic.go:334] "Generic (PLEG): container finished" podID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerID="8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0" exitCode=0 Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.393751 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.394478 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerDied","Data":"8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0"} Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.394582 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"eb76651e-5f57-4dee-904f-416ebe4c8306","Type":"ContainerDied","Data":"6d5e809396628aad71d305907db302423eeb6123f255cc88b79ea807aae00823"} Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.394647 5010 scope.go:117] "RemoveContainer" containerID="1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.424652 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.424955 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb76651e-5f57-4dee-904f-416ebe4c8306-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.429677 5010 scope.go:117] "RemoveContainer" containerID="89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.446542 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.454084 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.455948 5010 scope.go:117] "RemoveContainer" containerID="d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.474927 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475387 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="proxy-httpd" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475407 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="proxy-httpd" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475437 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb0e3931-24cf-4410-98c7-74cba52c93ae" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475445 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb0e3931-24cf-4410-98c7-74cba52c93ae" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475455 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7432b1a0-ae0e-4db9-8295-cc11d1d657e7" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475461 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7432b1a0-ae0e-4db9-8295-cc11d1d657e7" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475471 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7863fcf1-2cbd-44d2-8db8-bb9c896f70c4" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475476 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7863fcf1-2cbd-44d2-8db8-bb9c896f70c4" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475507 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-central-agent" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475513 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-central-agent" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475526 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0756834c-f4fd-4aaa-b3c9-d00fad779b4b" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475533 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0756834c-f4fd-4aaa-b3c9-d00fad779b4b" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475554 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-notification-agent" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475560 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-notification-agent" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475573 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc74f571-aa5a-4030-800c-2945c869fdd5" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475579 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc74f571-aa5a-4030-800c-2945c869fdd5" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475593 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="sg-core" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475600 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="sg-core" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.475610 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f3aae36-d899-446c-9cf0-9ee7c7218c98" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475616 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f3aae36-d899-446c-9cf0-9ee7c7218c98" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475799 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc74f571-aa5a-4030-800c-2945c869fdd5" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475814 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7432b1a0-ae0e-4db9-8295-cc11d1d657e7" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475822 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7863fcf1-2cbd-44d2-8db8-bb9c896f70c4" containerName="mariadb-database-create" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475834 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb0e3931-24cf-4410-98c7-74cba52c93ae" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475853 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="proxy-httpd" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475862 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-central-agent" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475870 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="sg-core" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475879 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" containerName="ceilometer-notification-agent" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475886 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0756834c-f4fd-4aaa-b3c9-d00fad779b4b" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.475896 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f3aae36-d899-446c-9cf0-9ee7c7218c98" containerName="mariadb-account-create-update" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.477903 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.479879 5010 scope.go:117] "RemoveContainer" containerID="8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.481423 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.481935 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.495224 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.520208 5010 scope.go:117] "RemoveContainer" containerID="1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.521017 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4\": container with ID starting with 1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4 not found: ID does not exist" containerID="1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.521054 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4"} err="failed to get container status \"1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4\": rpc error: code = NotFound desc = could not find container \"1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4\": container with ID starting with 1f1ae6cd8669486e198e3ee5a759c1fd146b72a1eb09f696c1ef91169c885dc4 not found: ID does not exist" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.521082 5010 scope.go:117] "RemoveContainer" containerID="89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.521555 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9\": container with ID starting with 89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9 not found: ID does not exist" containerID="89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.521594 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9"} err="failed to get container status \"89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9\": rpc error: code = NotFound desc = could not find container \"89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9\": container with ID starting with 89f301379d5b428ac1354fbb727946c775dc3b57562a0aeaabfa4f93c30f82e9 not found: ID does not exist" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.521620 5010 scope.go:117] "RemoveContainer" containerID="d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.521939 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f\": container with ID starting with d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f not found: ID does not exist" containerID="d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.521989 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f"} err="failed to get container status \"d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f\": rpc error: code = NotFound desc = could not find container \"d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f\": container with ID starting with d48cae3fadf987d682ecd7e086c9aba90141205773ee055ed33dffb637d9ab2f not found: ID does not exist" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.522010 5010 scope.go:117] "RemoveContainer" containerID="8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0" Nov 26 15:50:18 crc kubenswrapper[5010]: E1126 15:50:18.522283 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0\": container with ID starting with 8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0 not found: ID does not exist" containerID="8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.522319 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0"} err="failed to get container status \"8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0\": rpc error: code = NotFound desc = could not find container \"8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0\": container with ID starting with 8216110789ced313f2de31959918a8ce7b617db7da730789f83fee6f676905e0 not found: ID does not exist" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.628688 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqkmx\" (UniqueName: \"kubernetes.io/projected/c5bee0a3-2c20-4dae-abb1-801e9d53485b-kube-api-access-gqkmx\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.628862 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-log-httpd\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.628882 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-scripts\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.629035 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-config-data\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.629083 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.629264 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-run-httpd\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.629540 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.731843 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.731919 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqkmx\" (UniqueName: \"kubernetes.io/projected/c5bee0a3-2c20-4dae-abb1-801e9d53485b-kube-api-access-gqkmx\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.732032 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-log-httpd\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.732060 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-scripts\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.732117 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-config-data\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.732141 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.732207 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-run-httpd\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.732681 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-run-httpd\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.733018 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-log-httpd\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.738035 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-config-data\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.738265 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.739145 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-scripts\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.740304 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.749985 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqkmx\" (UniqueName: \"kubernetes.io/projected/c5bee0a3-2c20-4dae-abb1-801e9d53485b-kube-api-access-gqkmx\") pod \"ceilometer-0\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " pod="openstack/ceilometer-0" Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.773186 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.773478 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-log" containerID="cri-o://827c329ecf0f42138145a53ff8287287931b6a87e623afc3834445f7de0125e6" gracePeriod=30 Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.773524 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-httpd" containerID="cri-o://e873d5eecdcfbd7db00f4e712168ecab4bbdea7c34c56fa735dedfc748d1b292" gracePeriod=30 Nov 26 15:50:18 crc kubenswrapper[5010]: I1126 15:50:18.814852 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:19 crc kubenswrapper[5010]: I1126 15:50:19.206250 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:19 crc kubenswrapper[5010]: I1126 15:50:19.285617 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:19 crc kubenswrapper[5010]: W1126 15:50:19.291937 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc5bee0a3_2c20_4dae_abb1_801e9d53485b.slice/crio-8d9201b141bfe0059883d28721b8f01e57028edf690ef63fdaaef9fb7aec45cc WatchSource:0}: Error finding container 8d9201b141bfe0059883d28721b8f01e57028edf690ef63fdaaef9fb7aec45cc: Status 404 returned error can't find the container with id 8d9201b141bfe0059883d28721b8f01e57028edf690ef63fdaaef9fb7aec45cc Nov 26 15:50:19 crc kubenswrapper[5010]: I1126 15:50:19.403738 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerStarted","Data":"8d9201b141bfe0059883d28721b8f01e57028edf690ef63fdaaef9fb7aec45cc"} Nov 26 15:50:19 crc kubenswrapper[5010]: I1126 15:50:19.407386 5010 generic.go:334] "Generic (PLEG): container finished" podID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerID="827c329ecf0f42138145a53ff8287287931b6a87e623afc3834445f7de0125e6" exitCode=143 Nov 26 15:50:19 crc kubenswrapper[5010]: I1126 15:50:19.407429 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"13084c10-bc6a-48a7-8624-a405f5d06e3d","Type":"ContainerDied","Data":"827c329ecf0f42138145a53ff8287287931b6a87e623afc3834445f7de0125e6"} Nov 26 15:50:19 crc kubenswrapper[5010]: I1126 15:50:19.906202 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb76651e-5f57-4dee-904f-416ebe4c8306" path="/var/lib/kubelet/pods/eb76651e-5f57-4dee-904f-416ebe4c8306/volumes" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.354470 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.446807 5010 generic.go:334] "Generic (PLEG): container finished" podID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerID="f5c52d88f44b865b5096b50805d5e2f59cef8516541529d9fdf1ac840da5d9c1" exitCode=0 Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.446879 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4496d49-7b88-4c60-9fd5-fe0608f52b13","Type":"ContainerDied","Data":"f5c52d88f44b865b5096b50805d5e2f59cef8516541529d9fdf1ac840da5d9c1"} Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.446914 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"b4496d49-7b88-4c60-9fd5-fe0608f52b13","Type":"ContainerDied","Data":"b529a7981f6f67cb8a08573e1393b0118bc3ca94b7b7eb6bc05ba8c121157c90"} Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.446928 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b529a7981f6f67cb8a08573e1393b0118bc3ca94b7b7eb6bc05ba8c121157c90" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.449851 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.451509 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerStarted","Data":"5b92333b7dee230fd4797705b1bf7027b14c9a0b76579f966163f5850f0e52a8"} Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569620 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-combined-ca-bundle\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569686 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569740 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7mqk\" (UniqueName: \"kubernetes.io/projected/b4496d49-7b88-4c60-9fd5-fe0608f52b13-kube-api-access-z7mqk\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569809 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-logs\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569848 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-httpd-run\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569889 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-config-data\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569941 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-scripts\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.569997 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-public-tls-certs\") pod \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\" (UID: \"b4496d49-7b88-4c60-9fd5-fe0608f52b13\") " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.570285 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.570662 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.570820 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-logs" (OuterVolumeSpecName: "logs") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.576632 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4496d49-7b88-4c60-9fd5-fe0608f52b13-kube-api-access-z7mqk" (OuterVolumeSpecName: "kube-api-access-z7mqk") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "kube-api-access-z7mqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.600008 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.605629 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-scripts" (OuterVolumeSpecName: "scripts") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.638772 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.648853 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.672157 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.672193 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.672202 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.672231 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.672241 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7mqk\" (UniqueName: \"kubernetes.io/projected/b4496d49-7b88-4c60-9fd5-fe0608f52b13-kube-api-access-z7mqk\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.672250 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4496d49-7b88-4c60-9fd5-fe0608f52b13-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.673756 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-config-data" (OuterVolumeSpecName: "config-data") pod "b4496d49-7b88-4c60-9fd5-fe0608f52b13" (UID: "b4496d49-7b88-4c60-9fd5-fe0608f52b13"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.710587 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.774088 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:20 crc kubenswrapper[5010]: I1126 15:50:20.774130 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4496d49-7b88-4c60-9fd5-fe0608f52b13-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.461753 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.461997 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerStarted","Data":"5bf6f781171d797dbea180acce8224422b88df9e8beb994b88dc12e1fb509af1"} Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.503762 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.539962 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.549011 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:50:21 crc kubenswrapper[5010]: E1126 15:50:21.549423 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-log" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.549440 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-log" Nov 26 15:50:21 crc kubenswrapper[5010]: E1126 15:50:21.549466 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-httpd" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.549474 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-httpd" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.549701 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-log" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.549752 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" containerName="glance-httpd" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.597169 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.614809 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.616228 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.643779 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.701046 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.701369 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.701472 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnfsv\" (UniqueName: \"kubernetes.io/projected/37d52190-a61c-44fb-9c9c-7966bd00e2c8-kube-api-access-tnfsv\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.701673 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-logs\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.701833 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.701982 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.702111 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.702224 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-config-data\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.803961 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-logs\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.804048 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.804096 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.804120 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.804151 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-config-data\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.804191 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.804225 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.804243 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnfsv\" (UniqueName: \"kubernetes.io/projected/37d52190-a61c-44fb-9c9c-7966bd00e2c8-kube-api-access-tnfsv\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.805010 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-logs\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.805774 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.805938 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.811494 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.813140 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.815140 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-config-data\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.819554 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.829096 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnfsv\" (UniqueName: \"kubernetes.io/projected/37d52190-a61c-44fb-9c9c-7966bd00e2c8-kube-api-access-tnfsv\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.843581 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"glance-default-external-api-0\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " pod="openstack/glance-default-external-api-0" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.905179 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4496d49-7b88-4c60-9fd5-fe0608f52b13" path="/var/lib/kubelet/pods/b4496d49-7b88-4c60-9fd5-fe0608f52b13/volumes" Nov 26 15:50:21 crc kubenswrapper[5010]: I1126 15:50:21.955539 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.217909 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-56krg"] Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.221568 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.225111 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mv8bz" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.225323 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.225364 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.244909 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-56krg"] Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.314336 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt79b\" (UniqueName: \"kubernetes.io/projected/731566ac-0993-4f7d-a4ad-9fadd9beee04-kube-api-access-kt79b\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.314618 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.314675 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-config-data\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.314808 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-scripts\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.417162 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt79b\" (UniqueName: \"kubernetes.io/projected/731566ac-0993-4f7d-a4ad-9fadd9beee04-kube-api-access-kt79b\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.417319 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.417349 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-config-data\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.417388 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-scripts\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.424503 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-config-data\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.424787 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.432218 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-scripts\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.435998 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt79b\" (UniqueName: \"kubernetes.io/projected/731566ac-0993-4f7d-a4ad-9fadd9beee04-kube-api-access-kt79b\") pod \"nova-cell0-conductor-db-sync-56krg\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.478954 5010 generic.go:334] "Generic (PLEG): container finished" podID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerID="e873d5eecdcfbd7db00f4e712168ecab4bbdea7c34c56fa735dedfc748d1b292" exitCode=0 Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.479050 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"13084c10-bc6a-48a7-8624-a405f5d06e3d","Type":"ContainerDied","Data":"e873d5eecdcfbd7db00f4e712168ecab4bbdea7c34c56fa735dedfc748d1b292"} Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.504218 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerStarted","Data":"a8e4d225fcd4aaa77b55dfdf3eb2aa67e12d6774178dd7bea3bf6aff0a1f0002"} Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.568925 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.632957 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.693541 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722216 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-combined-ca-bundle\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722544 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-logs\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722602 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-internal-tls-certs\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722657 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722685 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4trk6\" (UniqueName: \"kubernetes.io/projected/13084c10-bc6a-48a7-8624-a405f5d06e3d-kube-api-access-4trk6\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722740 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-config-data\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722894 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-scripts\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.722931 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-httpd-run\") pod \"13084c10-bc6a-48a7-8624-a405f5d06e3d\" (UID: \"13084c10-bc6a-48a7-8624-a405f5d06e3d\") " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.728126 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.728215 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.728569 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-logs" (OuterVolumeSpecName: "logs") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.736450 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-scripts" (OuterVolumeSpecName: "scripts") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.739640 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13084c10-bc6a-48a7-8624-a405f5d06e3d-kube-api-access-4trk6" (OuterVolumeSpecName: "kube-api-access-4trk6") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "kube-api-access-4trk6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.767571 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.800926 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.802179 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-config-data" (OuterVolumeSpecName: "config-data") pod "13084c10-bc6a-48a7-8624-a405f5d06e3d" (UID: "13084c10-bc6a-48a7-8624-a405f5d06e3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828681 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828744 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4trk6\" (UniqueName: \"kubernetes.io/projected/13084c10-bc6a-48a7-8624-a405f5d06e3d-kube-api-access-4trk6\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828762 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828775 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828785 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828800 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828810 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/13084c10-bc6a-48a7-8624-a405f5d06e3d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.828820 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/13084c10-bc6a-48a7-8624-a405f5d06e3d-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.864746 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.869960 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.931118 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.942626 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-747455655b-ldrpd"] Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.942860 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-747455655b-ldrpd" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-api" containerID="cri-o://9b495458e612128334395409d752e36730b0d6b39ff6af3ed4daa774634efdd7" gracePeriod=30 Nov 26 15:50:22 crc kubenswrapper[5010]: I1126 15:50:22.943203 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-747455655b-ldrpd" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-httpd" containerID="cri-o://85ff0b0e3b7dd434e6128d567eeeeec11fd8ac7b2d055a682903e7034e63280c" gracePeriod=30 Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.141662 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-56krg"] Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.555675 5010 generic.go:334] "Generic (PLEG): container finished" podID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerID="85ff0b0e3b7dd434e6128d567eeeeec11fd8ac7b2d055a682903e7034e63280c" exitCode=0 Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.555787 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-747455655b-ldrpd" event={"ID":"3c9208c7-3716-48e8-9679-c1bb140259eb","Type":"ContainerDied","Data":"85ff0b0e3b7dd434e6128d567eeeeec11fd8ac7b2d055a682903e7034e63280c"} Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.573099 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-56krg" event={"ID":"731566ac-0993-4f7d-a4ad-9fadd9beee04","Type":"ContainerStarted","Data":"5c226fa9f633dd1e9282e5c5d6aefa260789d05712433a64a7b83858fb47cc85"} Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.593775 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"13084c10-bc6a-48a7-8624-a405f5d06e3d","Type":"ContainerDied","Data":"5296c4177f4329f3dce5f739eaeaff11a945ce94eea38f794ad51f8670aecf3b"} Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.593827 5010 scope.go:117] "RemoveContainer" containerID="e873d5eecdcfbd7db00f4e712168ecab4bbdea7c34c56fa735dedfc748d1b292" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.593966 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.622142 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37d52190-a61c-44fb-9c9c-7966bd00e2c8","Type":"ContainerStarted","Data":"1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045"} Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.622183 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37d52190-a61c-44fb-9c9c-7966bd00e2c8","Type":"ContainerStarted","Data":"f6e18794f4e63dd4f476d10515df3ff32d55d7ea4d051925cb43bda9cf16489e"} Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.653637 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.678509 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.687206 5010 scope.go:117] "RemoveContainer" containerID="827c329ecf0f42138145a53ff8287287931b6a87e623afc3834445f7de0125e6" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.725387 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:50:23 crc kubenswrapper[5010]: E1126 15:50:23.725832 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-log" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.725849 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-log" Nov 26 15:50:23 crc kubenswrapper[5010]: E1126 15:50:23.725872 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-httpd" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.725879 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-httpd" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.726074 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-httpd" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.726096 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" containerName="glance-log" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.727181 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.732046 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.732272 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 15:50:23 crc kubenswrapper[5010]: I1126 15:50:23.746038 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.863352 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.863894 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.863928 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-logs\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.863996 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.864216 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.864264 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tzpc\" (UniqueName: \"kubernetes.io/projected/fe931cd2-6e31-4e82-a617-f028019a60c4-kube-api-access-2tzpc\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.864330 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.867834 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969592 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969672 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969693 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tzpc\" (UniqueName: \"kubernetes.io/projected/fe931cd2-6e31-4e82-a617-f028019a60c4-kube-api-access-2tzpc\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969754 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969784 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969808 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969865 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.969890 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-logs\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.970285 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-logs\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.975818 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.978436 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:23.978985 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.001876 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.005039 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.005436 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.007606 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tzpc\" (UniqueName: \"kubernetes.io/projected/fe931cd2-6e31-4e82-a617-f028019a60c4-kube-api-access-2tzpc\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.014170 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.063786 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.085701 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13084c10-bc6a-48a7-8624-a405f5d06e3d" path="/var/lib/kubelet/pods/13084c10-bc6a-48a7-8624-a405f5d06e3d/volumes" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.655678 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37d52190-a61c-44fb-9c9c-7966bd00e2c8","Type":"ContainerStarted","Data":"9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b"} Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.665127 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerStarted","Data":"62ec60b700d8475ab38441d18c9c27bab1124087d3c0005ce5a8fcd867f35b22"} Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.665298 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-central-agent" containerID="cri-o://5b92333b7dee230fd4797705b1bf7027b14c9a0b76579f966163f5850f0e52a8" gracePeriod=30 Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.665642 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.665695 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="proxy-httpd" containerID="cri-o://62ec60b700d8475ab38441d18c9c27bab1124087d3c0005ce5a8fcd867f35b22" gracePeriod=30 Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.665782 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="sg-core" containerID="cri-o://a8e4d225fcd4aaa77b55dfdf3eb2aa67e12d6774178dd7bea3bf6aff0a1f0002" gracePeriod=30 Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.665830 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-notification-agent" containerID="cri-o://5bf6f781171d797dbea180acce8224422b88df9e8beb994b88dc12e1fb509af1" gracePeriod=30 Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.722523 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.880564107 podStartE2EDuration="6.722505218s" podCreationTimestamp="2025-11-26 15:50:18 +0000 UTC" firstStartedPulling="2025-11-26 15:50:19.293938563 +0000 UTC m=+1440.084655721" lastFinishedPulling="2025-11-26 15:50:24.135879684 +0000 UTC m=+1444.926596832" observedRunningTime="2025-11-26 15:50:24.714080598 +0000 UTC m=+1445.504797746" watchObservedRunningTime="2025-11-26 15:50:24.722505218 +0000 UTC m=+1445.513222366" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.722853 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.722848597 podStartE2EDuration="3.722848597s" podCreationTimestamp="2025-11-26 15:50:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:50:24.691883102 +0000 UTC m=+1445.482600250" watchObservedRunningTime="2025-11-26 15:50:24.722848597 +0000 UTC m=+1445.513565745" Nov 26 15:50:24 crc kubenswrapper[5010]: I1126 15:50:24.777533 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.688375 5010 generic.go:334] "Generic (PLEG): container finished" podID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerID="62ec60b700d8475ab38441d18c9c27bab1124087d3c0005ce5a8fcd867f35b22" exitCode=0 Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.688638 5010 generic.go:334] "Generic (PLEG): container finished" podID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerID="a8e4d225fcd4aaa77b55dfdf3eb2aa67e12d6774178dd7bea3bf6aff0a1f0002" exitCode=2 Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.688646 5010 generic.go:334] "Generic (PLEG): container finished" podID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerID="5bf6f781171d797dbea180acce8224422b88df9e8beb994b88dc12e1fb509af1" exitCode=0 Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.688451 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerDied","Data":"62ec60b700d8475ab38441d18c9c27bab1124087d3c0005ce5a8fcd867f35b22"} Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.688730 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerDied","Data":"a8e4d225fcd4aaa77b55dfdf3eb2aa67e12d6774178dd7bea3bf6aff0a1f0002"} Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.688750 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerDied","Data":"5bf6f781171d797dbea180acce8224422b88df9e8beb994b88dc12e1fb509af1"} Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.693318 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe931cd2-6e31-4e82-a617-f028019a60c4","Type":"ContainerStarted","Data":"547cc5858c244164a45ddd7c0b27e3033da3950ba49796f26212abc8845b9246"} Nov 26 15:50:25 crc kubenswrapper[5010]: I1126 15:50:25.693481 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe931cd2-6e31-4e82-a617-f028019a60c4","Type":"ContainerStarted","Data":"1d09fefea6425292ffe509ee1e2b6e0ab4205ea5dd4513f4c904ddea49391dc6"} Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.714256 5010 generic.go:334] "Generic (PLEG): container finished" podID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerID="9b495458e612128334395409d752e36730b0d6b39ff6af3ed4daa774634efdd7" exitCode=0 Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.714775 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-747455655b-ldrpd" event={"ID":"3c9208c7-3716-48e8-9679-c1bb140259eb","Type":"ContainerDied","Data":"9b495458e612128334395409d752e36730b0d6b39ff6af3ed4daa774634efdd7"} Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.714807 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-747455655b-ldrpd" event={"ID":"3c9208c7-3716-48e8-9679-c1bb140259eb","Type":"ContainerDied","Data":"c90d59a1cdafcc8abd8f10fe7bd3ef51157602680a2751352c01c73b1a9a8d15"} Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.714819 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c90d59a1cdafcc8abd8f10fe7bd3ef51157602680a2751352c01c73b1a9a8d15" Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.716980 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe931cd2-6e31-4e82-a617-f028019a60c4","Type":"ContainerStarted","Data":"e14aef587918296a922d16942a038b94eb34c104faed82cb3cae2790e3e19fba"} Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.754421 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.7544019950000003 podStartE2EDuration="3.754401995s" podCreationTimestamp="2025-11-26 15:50:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:50:26.743265277 +0000 UTC m=+1447.533982465" watchObservedRunningTime="2025-11-26 15:50:26.754401995 +0000 UTC m=+1447.545119143" Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.762405 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.947028 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l28j5\" (UniqueName: \"kubernetes.io/projected/3c9208c7-3716-48e8-9679-c1bb140259eb-kube-api-access-l28j5\") pod \"3c9208c7-3716-48e8-9679-c1bb140259eb\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.947245 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-ovndb-tls-certs\") pod \"3c9208c7-3716-48e8-9679-c1bb140259eb\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.947390 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-httpd-config\") pod \"3c9208c7-3716-48e8-9679-c1bb140259eb\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.947494 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-config\") pod \"3c9208c7-3716-48e8-9679-c1bb140259eb\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.947641 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-combined-ca-bundle\") pod \"3c9208c7-3716-48e8-9679-c1bb140259eb\" (UID: \"3c9208c7-3716-48e8-9679-c1bb140259eb\") " Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.954170 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "3c9208c7-3716-48e8-9679-c1bb140259eb" (UID: "3c9208c7-3716-48e8-9679-c1bb140259eb"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:26 crc kubenswrapper[5010]: I1126 15:50:26.954385 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c9208c7-3716-48e8-9679-c1bb140259eb-kube-api-access-l28j5" (OuterVolumeSpecName: "kube-api-access-l28j5") pod "3c9208c7-3716-48e8-9679-c1bb140259eb" (UID: "3c9208c7-3716-48e8-9679-c1bb140259eb"). InnerVolumeSpecName "kube-api-access-l28j5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.004667 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-config" (OuterVolumeSpecName: "config") pod "3c9208c7-3716-48e8-9679-c1bb140259eb" (UID: "3c9208c7-3716-48e8-9679-c1bb140259eb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.012187 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c9208c7-3716-48e8-9679-c1bb140259eb" (UID: "3c9208c7-3716-48e8-9679-c1bb140259eb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.028282 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "3c9208c7-3716-48e8-9679-c1bb140259eb" (UID: "3c9208c7-3716-48e8-9679-c1bb140259eb"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.050578 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l28j5\" (UniqueName: \"kubernetes.io/projected/3c9208c7-3716-48e8-9679-c1bb140259eb-kube-api-access-l28j5\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.050875 5010 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.050887 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.050898 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.050906 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c9208c7-3716-48e8-9679-c1bb140259eb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.738157 5010 generic.go:334] "Generic (PLEG): container finished" podID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerID="5b92333b7dee230fd4797705b1bf7027b14c9a0b76579f966163f5850f0e52a8" exitCode=0 Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.738321 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-747455655b-ldrpd" Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.739874 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerDied","Data":"5b92333b7dee230fd4797705b1bf7027b14c9a0b76579f966163f5850f0e52a8"} Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.775749 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-747455655b-ldrpd"] Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.790812 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-747455655b-ldrpd"] Nov 26 15:50:27 crc kubenswrapper[5010]: I1126 15:50:27.906602 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" path="/var/lib/kubelet/pods/3c9208c7-3716-48e8-9679-c1bb140259eb/volumes" Nov 26 15:50:31 crc kubenswrapper[5010]: I1126 15:50:31.956567 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 15:50:31 crc kubenswrapper[5010]: I1126 15:50:31.956986 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 15:50:31 crc kubenswrapper[5010]: I1126 15:50:31.991166 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.000811 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.052462 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.163723 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-run-httpd\") pod \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.164127 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-config-data\") pod \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.164305 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqkmx\" (UniqueName: \"kubernetes.io/projected/c5bee0a3-2c20-4dae-abb1-801e9d53485b-kube-api-access-gqkmx\") pod \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.164212 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c5bee0a3-2c20-4dae-abb1-801e9d53485b" (UID: "c5bee0a3-2c20-4dae-abb1-801e9d53485b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.164504 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-combined-ca-bundle\") pod \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.164880 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-scripts\") pod \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.164945 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-log-httpd\") pod \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.165012 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-sg-core-conf-yaml\") pod \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\" (UID: \"c5bee0a3-2c20-4dae-abb1-801e9d53485b\") " Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.165480 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c5bee0a3-2c20-4dae-abb1-801e9d53485b" (UID: "c5bee0a3-2c20-4dae-abb1-801e9d53485b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.165805 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.166491 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c5bee0a3-2c20-4dae-abb1-801e9d53485b-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.169077 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-scripts" (OuterVolumeSpecName: "scripts") pod "c5bee0a3-2c20-4dae-abb1-801e9d53485b" (UID: "c5bee0a3-2c20-4dae-abb1-801e9d53485b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.172535 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5bee0a3-2c20-4dae-abb1-801e9d53485b-kube-api-access-gqkmx" (OuterVolumeSpecName: "kube-api-access-gqkmx") pod "c5bee0a3-2c20-4dae-abb1-801e9d53485b" (UID: "c5bee0a3-2c20-4dae-abb1-801e9d53485b"). InnerVolumeSpecName "kube-api-access-gqkmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.190541 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c5bee0a3-2c20-4dae-abb1-801e9d53485b" (UID: "c5bee0a3-2c20-4dae-abb1-801e9d53485b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.237323 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c5bee0a3-2c20-4dae-abb1-801e9d53485b" (UID: "c5bee0a3-2c20-4dae-abb1-801e9d53485b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.268147 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqkmx\" (UniqueName: \"kubernetes.io/projected/c5bee0a3-2c20-4dae-abb1-801e9d53485b-kube-api-access-gqkmx\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.268207 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.268225 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.268244 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.275176 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-config-data" (OuterVolumeSpecName: "config-data") pod "c5bee0a3-2c20-4dae-abb1-801e9d53485b" (UID: "c5bee0a3-2c20-4dae-abb1-801e9d53485b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.371337 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c5bee0a3-2c20-4dae-abb1-801e9d53485b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.790619 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-56krg" event={"ID":"731566ac-0993-4f7d-a4ad-9fadd9beee04","Type":"ContainerStarted","Data":"653bfa6b3829db91148e50d25975da256293544493c1a848948cdc245d1ddb31"} Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.793271 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c5bee0a3-2c20-4dae-abb1-801e9d53485b","Type":"ContainerDied","Data":"8d9201b141bfe0059883d28721b8f01e57028edf690ef63fdaaef9fb7aec45cc"} Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.793314 5010 scope.go:117] "RemoveContainer" containerID="62ec60b700d8475ab38441d18c9c27bab1124087d3c0005ce5a8fcd867f35b22" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.793461 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.793485 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.793864 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.819762 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-56krg" podStartSLOduration=2.120986351 podStartE2EDuration="10.819742487s" podCreationTimestamp="2025-11-26 15:50:22 +0000 UTC" firstStartedPulling="2025-11-26 15:50:23.16029127 +0000 UTC m=+1443.951008418" lastFinishedPulling="2025-11-26 15:50:31.859047406 +0000 UTC m=+1452.649764554" observedRunningTime="2025-11-26 15:50:32.811834809 +0000 UTC m=+1453.602551957" watchObservedRunningTime="2025-11-26 15:50:32.819742487 +0000 UTC m=+1453.610459645" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.886968 5010 scope.go:117] "RemoveContainer" containerID="a8e4d225fcd4aaa77b55dfdf3eb2aa67e12d6774178dd7bea3bf6aff0a1f0002" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.891875 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.908466 5010 scope.go:117] "RemoveContainer" containerID="5bf6f781171d797dbea180acce8224422b88df9e8beb994b88dc12e1fb509af1" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.913693 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.926773 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:32 crc kubenswrapper[5010]: E1126 15:50:32.927314 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="sg-core" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927337 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="sg-core" Nov 26 15:50:32 crc kubenswrapper[5010]: E1126 15:50:32.927358 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-httpd" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927366 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-httpd" Nov 26 15:50:32 crc kubenswrapper[5010]: E1126 15:50:32.927396 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-api" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927404 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-api" Nov 26 15:50:32 crc kubenswrapper[5010]: E1126 15:50:32.927429 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="proxy-httpd" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927436 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="proxy-httpd" Nov 26 15:50:32 crc kubenswrapper[5010]: E1126 15:50:32.927453 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-central-agent" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927461 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-central-agent" Nov 26 15:50:32 crc kubenswrapper[5010]: E1126 15:50:32.927482 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-notification-agent" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927491 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-notification-agent" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927697 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-httpd" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927729 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="sg-core" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927744 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c9208c7-3716-48e8-9679-c1bb140259eb" containerName="neutron-api" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927761 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-notification-agent" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927783 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="ceilometer-central-agent" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.927796 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" containerName="proxy-httpd" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.929822 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.932871 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.933198 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.934510 5010 scope.go:117] "RemoveContainer" containerID="5b92333b7dee230fd4797705b1bf7027b14c9a0b76579f966163f5850f0e52a8" Nov 26 15:50:32 crc kubenswrapper[5010]: I1126 15:50:32.961095 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.084061 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.084124 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-log-httpd\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.084159 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2xvr\" (UniqueName: \"kubernetes.io/projected/5530a835-f673-4445-9b0c-dee162f45a76-kube-api-access-j2xvr\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.084250 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-scripts\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.084291 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.084425 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-config-data\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.084480 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-run-httpd\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.186816 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.187131 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-log-httpd\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.187243 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2xvr\" (UniqueName: \"kubernetes.io/projected/5530a835-f673-4445-9b0c-dee162f45a76-kube-api-access-j2xvr\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.187407 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-scripts\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.187505 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.187638 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-config-data\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.188242 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-run-httpd\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.188846 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-run-httpd\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.189011 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-log-httpd\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.192332 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-config-data\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.192929 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.193556 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.209126 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-scripts\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.212534 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2xvr\" (UniqueName: \"kubernetes.io/projected/5530a835-f673-4445-9b0c-dee162f45a76-kube-api-access-j2xvr\") pod \"ceilometer-0\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.278905 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.742940 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.809472 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerStarted","Data":"64656c9ab2896d9680d5fe1ea864fe9640f048cefb2f30f2511bbf68c70dd9fb"} Nov 26 15:50:33 crc kubenswrapper[5010]: I1126 15:50:33.911646 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c5bee0a3-2c20-4dae-abb1-801e9d53485b" path="/var/lib/kubelet/pods/c5bee0a3-2c20-4dae-abb1-801e9d53485b/volumes" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.065786 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.065832 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.103384 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.114418 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.822609 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.822654 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.989480 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.989614 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 15:50:34 crc kubenswrapper[5010]: I1126 15:50:34.990880 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 15:50:36 crc kubenswrapper[5010]: I1126 15:50:36.858963 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerStarted","Data":"b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b"} Nov 26 15:50:37 crc kubenswrapper[5010]: I1126 15:50:37.012602 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:37 crc kubenswrapper[5010]: I1126 15:50:37.012725 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 15:50:37 crc kubenswrapper[5010]: I1126 15:50:37.273983 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 15:50:37 crc kubenswrapper[5010]: I1126 15:50:37.869735 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerStarted","Data":"04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607"} Nov 26 15:50:38 crc kubenswrapper[5010]: I1126 15:50:38.880426 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerStarted","Data":"35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac"} Nov 26 15:50:39 crc kubenswrapper[5010]: I1126 15:50:39.242514 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:39 crc kubenswrapper[5010]: I1126 15:50:39.901823 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-central-agent" containerID="cri-o://b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b" gracePeriod=30 Nov 26 15:50:39 crc kubenswrapper[5010]: I1126 15:50:39.902138 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="proxy-httpd" containerID="cri-o://333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea" gracePeriod=30 Nov 26 15:50:39 crc kubenswrapper[5010]: I1126 15:50:39.902213 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="sg-core" containerID="cri-o://35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac" gracePeriod=30 Nov 26 15:50:39 crc kubenswrapper[5010]: I1126 15:50:39.902253 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-notification-agent" containerID="cri-o://04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607" gracePeriod=30 Nov 26 15:50:39 crc kubenswrapper[5010]: I1126 15:50:39.905377 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerStarted","Data":"333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea"} Nov 26 15:50:39 crc kubenswrapper[5010]: I1126 15:50:39.979499 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.223793299 podStartE2EDuration="7.979472406s" podCreationTimestamp="2025-11-26 15:50:32 +0000 UTC" firstStartedPulling="2025-11-26 15:50:33.749656949 +0000 UTC m=+1454.540374097" lastFinishedPulling="2025-11-26 15:50:39.505336056 +0000 UTC m=+1460.296053204" observedRunningTime="2025-11-26 15:50:39.969213159 +0000 UTC m=+1460.759930307" watchObservedRunningTime="2025-11-26 15:50:39.979472406 +0000 UTC m=+1460.770189564" Nov 26 15:50:40 crc kubenswrapper[5010]: I1126 15:50:40.913747 5010 generic.go:334] "Generic (PLEG): container finished" podID="5530a835-f673-4445-9b0c-dee162f45a76" containerID="333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea" exitCode=0 Nov 26 15:50:40 crc kubenswrapper[5010]: I1126 15:50:40.914144 5010 generic.go:334] "Generic (PLEG): container finished" podID="5530a835-f673-4445-9b0c-dee162f45a76" containerID="35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac" exitCode=2 Nov 26 15:50:40 crc kubenswrapper[5010]: I1126 15:50:40.914158 5010 generic.go:334] "Generic (PLEG): container finished" podID="5530a835-f673-4445-9b0c-dee162f45a76" containerID="04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607" exitCode=0 Nov 26 15:50:40 crc kubenswrapper[5010]: I1126 15:50:40.913817 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerDied","Data":"333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea"} Nov 26 15:50:40 crc kubenswrapper[5010]: I1126 15:50:40.914221 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerDied","Data":"35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac"} Nov 26 15:50:40 crc kubenswrapper[5010]: I1126 15:50:40.914248 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerDied","Data":"04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607"} Nov 26 15:50:41 crc kubenswrapper[5010]: I1126 15:50:41.423166 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:50:41 crc kubenswrapper[5010]: I1126 15:50:41.423496 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.645758 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.704385 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-log-httpd\") pod \"5530a835-f673-4445-9b0c-dee162f45a76\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.704522 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-combined-ca-bundle\") pod \"5530a835-f673-4445-9b0c-dee162f45a76\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.704545 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-config-data\") pod \"5530a835-f673-4445-9b0c-dee162f45a76\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.704585 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2xvr\" (UniqueName: \"kubernetes.io/projected/5530a835-f673-4445-9b0c-dee162f45a76-kube-api-access-j2xvr\") pod \"5530a835-f673-4445-9b0c-dee162f45a76\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.705004 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5530a835-f673-4445-9b0c-dee162f45a76" (UID: "5530a835-f673-4445-9b0c-dee162f45a76"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.705385 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-scripts\") pod \"5530a835-f673-4445-9b0c-dee162f45a76\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.705426 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-run-httpd\") pod \"5530a835-f673-4445-9b0c-dee162f45a76\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.705452 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-sg-core-conf-yaml\") pod \"5530a835-f673-4445-9b0c-dee162f45a76\" (UID: \"5530a835-f673-4445-9b0c-dee162f45a76\") " Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.705777 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5530a835-f673-4445-9b0c-dee162f45a76" (UID: "5530a835-f673-4445-9b0c-dee162f45a76"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.706298 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.706324 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5530a835-f673-4445-9b0c-dee162f45a76-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.711560 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-scripts" (OuterVolumeSpecName: "scripts") pod "5530a835-f673-4445-9b0c-dee162f45a76" (UID: "5530a835-f673-4445-9b0c-dee162f45a76"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.712339 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5530a835-f673-4445-9b0c-dee162f45a76-kube-api-access-j2xvr" (OuterVolumeSpecName: "kube-api-access-j2xvr") pod "5530a835-f673-4445-9b0c-dee162f45a76" (UID: "5530a835-f673-4445-9b0c-dee162f45a76"). InnerVolumeSpecName "kube-api-access-j2xvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.734984 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5530a835-f673-4445-9b0c-dee162f45a76" (UID: "5530a835-f673-4445-9b0c-dee162f45a76"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.792028 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5530a835-f673-4445-9b0c-dee162f45a76" (UID: "5530a835-f673-4445-9b0c-dee162f45a76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.807949 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.808001 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2xvr\" (UniqueName: \"kubernetes.io/projected/5530a835-f673-4445-9b0c-dee162f45a76-kube-api-access-j2xvr\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.808018 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.808029 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.814010 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-config-data" (OuterVolumeSpecName: "config-data") pod "5530a835-f673-4445-9b0c-dee162f45a76" (UID: "5530a835-f673-4445-9b0c-dee162f45a76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.910350 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5530a835-f673-4445-9b0c-dee162f45a76-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.958259 5010 generic.go:334] "Generic (PLEG): container finished" podID="5530a835-f673-4445-9b0c-dee162f45a76" containerID="b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b" exitCode=0 Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.958315 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerDied","Data":"b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b"} Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.958362 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.958388 5010 scope.go:117] "RemoveContainer" containerID="333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea" Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.958369 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5530a835-f673-4445-9b0c-dee162f45a76","Type":"ContainerDied","Data":"64656c9ab2896d9680d5fe1ea864fe9640f048cefb2f30f2511bbf68c70dd9fb"} Nov 26 15:50:43 crc kubenswrapper[5010]: I1126 15:50:43.992239 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.013981 5010 scope.go:117] "RemoveContainer" containerID="35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.018256 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049081 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.049515 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="proxy-httpd" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049527 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="proxy-httpd" Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.049552 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="sg-core" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049558 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="sg-core" Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.049579 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-notification-agent" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049586 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-notification-agent" Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.049597 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-central-agent" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049603 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-central-agent" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049798 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="sg-core" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049813 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-notification-agent" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049830 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="ceilometer-central-agent" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.049842 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5530a835-f673-4445-9b0c-dee162f45a76" containerName="proxy-httpd" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.054410 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.058218 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.058562 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.063923 5010 scope.go:117] "RemoveContainer" containerID="04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.079851 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.101754 5010 scope.go:117] "RemoveContainer" containerID="b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.113791 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.114341 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-config-data\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.114415 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-scripts\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.114492 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.114519 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kktb5\" (UniqueName: \"kubernetes.io/projected/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-kube-api-access-kktb5\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.114604 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-run-httpd\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.114670 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-log-httpd\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.126607 5010 scope.go:117] "RemoveContainer" containerID="333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea" Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.129415 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea\": container with ID starting with 333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea not found: ID does not exist" containerID="333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.129468 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea"} err="failed to get container status \"333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea\": rpc error: code = NotFound desc = could not find container \"333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea\": container with ID starting with 333c40101c44a4e7c33f461d676f962de2ef2d89b76202962a51f43d9c341aea not found: ID does not exist" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.129500 5010 scope.go:117] "RemoveContainer" containerID="35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac" Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.129855 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac\": container with ID starting with 35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac not found: ID does not exist" containerID="35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.129879 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac"} err="failed to get container status \"35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac\": rpc error: code = NotFound desc = could not find container \"35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac\": container with ID starting with 35206a955ed48425372b72ce78421c7e770d7dda4bc4f94613c5a75f9cb3b2ac not found: ID does not exist" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.129895 5010 scope.go:117] "RemoveContainer" containerID="04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607" Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.130112 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607\": container with ID starting with 04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607 not found: ID does not exist" containerID="04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.130137 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607"} err="failed to get container status \"04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607\": rpc error: code = NotFound desc = could not find container \"04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607\": container with ID starting with 04df112e91f6fae0c161776a8b67f5bf4668d00b101eed01909effcc1496d607 not found: ID does not exist" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.130154 5010 scope.go:117] "RemoveContainer" containerID="b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b" Nov 26 15:50:44 crc kubenswrapper[5010]: E1126 15:50:44.130348 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b\": container with ID starting with b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b not found: ID does not exist" containerID="b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.130370 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b"} err="failed to get container status \"b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b\": rpc error: code = NotFound desc = could not find container \"b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b\": container with ID starting with b4af86264e5e3d9e7b98486c257ee373317cc6cad5fa90926c09828dab31f29b not found: ID does not exist" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.218827 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-scripts\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.219149 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.219282 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kktb5\" (UniqueName: \"kubernetes.io/projected/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-kube-api-access-kktb5\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.219425 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-run-httpd\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.219657 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-log-httpd\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.219814 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.219912 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-run-httpd\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.219921 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-config-data\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.220274 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-log-httpd\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.223433 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.224103 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-config-data\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.224726 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.225763 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-scripts\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.248125 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kktb5\" (UniqueName: \"kubernetes.io/projected/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-kube-api-access-kktb5\") pod \"ceilometer-0\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.392390 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.909384 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:50:44 crc kubenswrapper[5010]: W1126 15:50:44.919381 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1db39e6e_dc82_4ded_8c69_37ba0746ab3e.slice/crio-0bc9cb14f1a778f2264cd2f64d82fa14267ec51371703b737b87a440853ea58f WatchSource:0}: Error finding container 0bc9cb14f1a778f2264cd2f64d82fa14267ec51371703b737b87a440853ea58f: Status 404 returned error can't find the container with id 0bc9cb14f1a778f2264cd2f64d82fa14267ec51371703b737b87a440853ea58f Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.971836 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerStarted","Data":"0bc9cb14f1a778f2264cd2f64d82fa14267ec51371703b737b87a440853ea58f"} Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.977516 5010 generic.go:334] "Generic (PLEG): container finished" podID="731566ac-0993-4f7d-a4ad-9fadd9beee04" containerID="653bfa6b3829db91148e50d25975da256293544493c1a848948cdc245d1ddb31" exitCode=0 Nov 26 15:50:44 crc kubenswrapper[5010]: I1126 15:50:44.977583 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-56krg" event={"ID":"731566ac-0993-4f7d-a4ad-9fadd9beee04","Type":"ContainerDied","Data":"653bfa6b3829db91148e50d25975da256293544493c1a848948cdc245d1ddb31"} Nov 26 15:50:45 crc kubenswrapper[5010]: I1126 15:50:45.911750 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5530a835-f673-4445-9b0c-dee162f45a76" path="/var/lib/kubelet/pods/5530a835-f673-4445-9b0c-dee162f45a76/volumes" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.413227 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.463436 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-config-data\") pod \"731566ac-0993-4f7d-a4ad-9fadd9beee04\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.463546 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-scripts\") pod \"731566ac-0993-4f7d-a4ad-9fadd9beee04\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.463570 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-combined-ca-bundle\") pod \"731566ac-0993-4f7d-a4ad-9fadd9beee04\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.463647 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt79b\" (UniqueName: \"kubernetes.io/projected/731566ac-0993-4f7d-a4ad-9fadd9beee04-kube-api-access-kt79b\") pod \"731566ac-0993-4f7d-a4ad-9fadd9beee04\" (UID: \"731566ac-0993-4f7d-a4ad-9fadd9beee04\") " Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.472789 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-scripts" (OuterVolumeSpecName: "scripts") pod "731566ac-0993-4f7d-a4ad-9fadd9beee04" (UID: "731566ac-0993-4f7d-a4ad-9fadd9beee04"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.474893 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/731566ac-0993-4f7d-a4ad-9fadd9beee04-kube-api-access-kt79b" (OuterVolumeSpecName: "kube-api-access-kt79b") pod "731566ac-0993-4f7d-a4ad-9fadd9beee04" (UID: "731566ac-0993-4f7d-a4ad-9fadd9beee04"). InnerVolumeSpecName "kube-api-access-kt79b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.498510 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "731566ac-0993-4f7d-a4ad-9fadd9beee04" (UID: "731566ac-0993-4f7d-a4ad-9fadd9beee04"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.498790 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-config-data" (OuterVolumeSpecName: "config-data") pod "731566ac-0993-4f7d-a4ad-9fadd9beee04" (UID: "731566ac-0993-4f7d-a4ad-9fadd9beee04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.565796 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.565835 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.565865 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt79b\" (UniqueName: \"kubernetes.io/projected/731566ac-0993-4f7d-a4ad-9fadd9beee04-kube-api-access-kt79b\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:46 crc kubenswrapper[5010]: I1126 15:50:46.565877 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/731566ac-0993-4f7d-a4ad-9fadd9beee04-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.005925 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-56krg" event={"ID":"731566ac-0993-4f7d-a4ad-9fadd9beee04","Type":"ContainerDied","Data":"5c226fa9f633dd1e9282e5c5d6aefa260789d05712433a64a7b83858fb47cc85"} Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.005988 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c226fa9f633dd1e9282e5c5d6aefa260789d05712433a64a7b83858fb47cc85" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.005997 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-56krg" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.171867 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 15:50:47 crc kubenswrapper[5010]: E1126 15:50:47.172670 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731566ac-0993-4f7d-a4ad-9fadd9beee04" containerName="nova-cell0-conductor-db-sync" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.172693 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="731566ac-0993-4f7d-a4ad-9fadd9beee04" containerName="nova-cell0-conductor-db-sync" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.172942 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="731566ac-0993-4f7d-a4ad-9fadd9beee04" containerName="nova-cell0-conductor-db-sync" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.173796 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.177515 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-mv8bz" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.177765 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.177835 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.278995 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.279278 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.279442 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg9v7\" (UniqueName: \"kubernetes.io/projected/3c00abcf-4e27-48ae-be52-a92cbd24957c-kube-api-access-xg9v7\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.380886 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.380945 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.381048 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg9v7\" (UniqueName: \"kubernetes.io/projected/3c00abcf-4e27-48ae-be52-a92cbd24957c-kube-api-access-xg9v7\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.386821 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.389508 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.411553 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg9v7\" (UniqueName: \"kubernetes.io/projected/3c00abcf-4e27-48ae-be52-a92cbd24957c-kube-api-access-xg9v7\") pod \"nova-cell0-conductor-0\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.499316 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:47 crc kubenswrapper[5010]: I1126 15:50:47.971198 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 15:50:47 crc kubenswrapper[5010]: W1126 15:50:47.979140 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3c00abcf_4e27_48ae_be52_a92cbd24957c.slice/crio-56cbcb98173f0b8ca158f1fa0d577900ae08393612173b33da86f9854f0d3d87 WatchSource:0}: Error finding container 56cbcb98173f0b8ca158f1fa0d577900ae08393612173b33da86f9854f0d3d87: Status 404 returned error can't find the container with id 56cbcb98173f0b8ca158f1fa0d577900ae08393612173b33da86f9854f0d3d87 Nov 26 15:50:48 crc kubenswrapper[5010]: I1126 15:50:48.023168 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerStarted","Data":"be84644312daa0eeaad435cd97f4bf9e69ce0fec67353d644d52be09ca1dda14"} Nov 26 15:50:48 crc kubenswrapper[5010]: I1126 15:50:48.024535 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3c00abcf-4e27-48ae-be52-a92cbd24957c","Type":"ContainerStarted","Data":"56cbcb98173f0b8ca158f1fa0d577900ae08393612173b33da86f9854f0d3d87"} Nov 26 15:50:49 crc kubenswrapper[5010]: I1126 15:50:49.044461 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerStarted","Data":"2b805e0f105e16a084cd77b2e2d7a3e9d50e1467ddbfd960127ae1af4d8e50b6"} Nov 26 15:50:49 crc kubenswrapper[5010]: I1126 15:50:49.044878 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerStarted","Data":"fec477bfab8c2e30ff3f25a68c1456490f0eea9398ae0047823a6fb6369ffbd8"} Nov 26 15:50:49 crc kubenswrapper[5010]: I1126 15:50:49.050497 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3c00abcf-4e27-48ae-be52-a92cbd24957c","Type":"ContainerStarted","Data":"b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe"} Nov 26 15:50:49 crc kubenswrapper[5010]: I1126 15:50:49.050792 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:49 crc kubenswrapper[5010]: I1126 15:50:49.070766 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.070744323 podStartE2EDuration="2.070744323s" podCreationTimestamp="2025-11-26 15:50:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:50:49.066578499 +0000 UTC m=+1469.857295687" watchObservedRunningTime="2025-11-26 15:50:49.070744323 +0000 UTC m=+1469.861461531" Nov 26 15:50:51 crc kubenswrapper[5010]: I1126 15:50:51.077461 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerStarted","Data":"d54a2b4407680a0f0c71ee70b5d943e7422cb44f2affa01809599bc613c10966"} Nov 26 15:50:51 crc kubenswrapper[5010]: I1126 15:50:51.078000 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 15:50:51 crc kubenswrapper[5010]: I1126 15:50:51.106829 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8869192099999998 podStartE2EDuration="7.106804514s" podCreationTimestamp="2025-11-26 15:50:44 +0000 UTC" firstStartedPulling="2025-11-26 15:50:44.92509828 +0000 UTC m=+1465.715815448" lastFinishedPulling="2025-11-26 15:50:50.144983564 +0000 UTC m=+1470.935700752" observedRunningTime="2025-11-26 15:50:51.097119202 +0000 UTC m=+1471.887836440" watchObservedRunningTime="2025-11-26 15:50:51.106804514 +0000 UTC m=+1471.897521662" Nov 26 15:50:57 crc kubenswrapper[5010]: I1126 15:50:57.537453 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 15:50:57 crc kubenswrapper[5010]: I1126 15:50:57.862517 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8gn7x"] Nov 26 15:50:57 crc kubenswrapper[5010]: I1126 15:50:57.868417 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:57 crc kubenswrapper[5010]: I1126 15:50:57.875760 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8gn7x"] Nov 26 15:50:57 crc kubenswrapper[5010]: I1126 15:50:57.908630 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/15e0c240-1511-4902-9893-a3f9dd146c8a-kube-api-access-r8zs8\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:57 crc kubenswrapper[5010]: I1126 15:50:57.909175 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-catalog-content\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:57 crc kubenswrapper[5010]: I1126 15:50:57.909199 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-utilities\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.020319 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-catalog-content\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.020377 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-utilities\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.020829 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/15e0c240-1511-4902-9893-a3f9dd146c8a-kube-api-access-r8zs8\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.021064 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-catalog-content\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.022005 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-utilities\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.055687 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/15e0c240-1511-4902-9893-a3f9dd146c8a-kube-api-access-r8zs8\") pod \"redhat-marketplace-8gn7x\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.066458 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-hhsq2"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.067820 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.072183 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.072750 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.087467 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-hhsq2"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.205698 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.207192 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.210564 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.213147 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.224375 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.239237 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2dn5\" (UniqueName: \"kubernetes.io/projected/91f360e4-480f-4398-9d5c-c19e3146a160-kube-api-access-c2dn5\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.239320 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-config-data\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.239380 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-config-data\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.239405 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.239426 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-scripts\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.239444 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7xsh\" (UniqueName: \"kubernetes.io/projected/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-kube-api-access-c7xsh\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.239510 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.282391 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.285541 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.295888 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.344923 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2dn5\" (UniqueName: \"kubernetes.io/projected/91f360e4-480f-4398-9d5c-c19e3146a160-kube-api-access-c2dn5\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345022 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345060 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktl4x\" (UniqueName: \"kubernetes.io/projected/7416391c-360d-4bcd-9cfa-6977446520ed-kube-api-access-ktl4x\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345116 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-config-data\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345166 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-config-data\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345204 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345234 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-scripts\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345261 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7xsh\" (UniqueName: \"kubernetes.io/projected/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-kube-api-access-c7xsh\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345284 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.345327 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.362814 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.365129 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.365766 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-config-data\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.375669 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.376215 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-config-data\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.377242 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-scripts\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.401428 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7xsh\" (UniqueName: \"kubernetes.io/projected/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-kube-api-access-c7xsh\") pod \"nova-scheduler-0\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.429315 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2dn5\" (UniqueName: \"kubernetes.io/projected/91f360e4-480f-4398-9d5c-c19e3146a160-kube-api-access-c2dn5\") pod \"nova-cell0-cell-mapping-hhsq2\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.432819 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.434539 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.435976 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.446928 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8s8ll\" (UniqueName: \"kubernetes.io/projected/a624105a-6d36-4a58-b571-d16de2550312-kube-api-access-8s8ll\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.446993 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.447046 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a624105a-6d36-4a58-b571-d16de2550312-logs\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.447069 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.447090 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktl4x\" (UniqueName: \"kubernetes.io/projected/7416391c-360d-4bcd-9cfa-6977446520ed-kube-api-access-ktl4x\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.447144 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-config-data\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.447175 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.453806 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.456453 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.470568 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.479363 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.482166 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.485933 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.498818 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktl4x\" (UniqueName: \"kubernetes.io/projected/7416391c-360d-4bcd-9cfa-6977446520ed-kube-api-access-ktl4x\") pod \"nova-cell1-novncproxy-0\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.538978 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.552371 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8s8ll\" (UniqueName: \"kubernetes.io/projected/a624105a-6d36-4a58-b571-d16de2550312-kube-api-access-8s8ll\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.552424 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.552492 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a624105a-6d36-4a58-b571-d16de2550312-logs\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.552551 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-config-data\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.559910 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a624105a-6d36-4a58-b571-d16de2550312-logs\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.566316 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.568431 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-config-data\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.579606 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.583771 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.582774 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5dc9ff69c7-klxtz"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.585066 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8s8ll\" (UniqueName: \"kubernetes.io/projected/a624105a-6d36-4a58-b571-d16de2550312-kube-api-access-8s8ll\") pod \"nova-metadata-0\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.586836 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.603036 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.611221 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dc9ff69c7-klxtz"] Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.679845 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9xs6\" (UniqueName: \"kubernetes.io/projected/e508c27c-9334-411e-9861-5be1672da574-kube-api-access-g9xs6\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.679914 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-config-data\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.680093 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.680325 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e508c27c-9334-411e-9861-5be1672da574-logs\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.720181 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787150 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-config\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787287 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e508c27c-9334-411e-9861-5be1672da574-logs\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787333 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787397 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787422 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787444 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ftb9\" (UniqueName: \"kubernetes.io/projected/25f06307-6ec3-453f-b620-f76285347939-kube-api-access-7ftb9\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787498 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9xs6\" (UniqueName: \"kubernetes.io/projected/e508c27c-9334-411e-9861-5be1672da574-kube-api-access-g9xs6\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787520 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-config-data\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787610 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.787664 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-svc\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.789658 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e508c27c-9334-411e-9861-5be1672da574-logs\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.795834 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-config-data\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.826446 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.852682 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9xs6\" (UniqueName: \"kubernetes.io/projected/e508c27c-9334-411e-9861-5be1672da574-kube-api-access-g9xs6\") pod \"nova-api-0\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.898980 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-svc\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.899078 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-config\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.899148 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.899197 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.899232 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.899259 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ftb9\" (UniqueName: \"kubernetes.io/projected/25f06307-6ec3-453f-b620-f76285347939-kube-api-access-7ftb9\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.900701 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-svc\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.900747 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-nb\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.901296 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-config\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.901852 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-swift-storage-0\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.902009 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-sb\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.918244 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.923566 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ftb9\" (UniqueName: \"kubernetes.io/projected/25f06307-6ec3-453f-b620-f76285347939-kube-api-access-7ftb9\") pod \"dnsmasq-dns-5dc9ff69c7-klxtz\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:58 crc kubenswrapper[5010]: I1126 15:50:58.938363 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.193007 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8gn7x"] Nov 26 15:50:59 crc kubenswrapper[5010]: W1126 15:50:59.211280 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15e0c240_1511_4902_9893_a3f9dd146c8a.slice/crio-c6f5b7101fb817b6fa0c009740cf75cba79fcf9b46703077e9eb4446f3c34a53 WatchSource:0}: Error finding container c6f5b7101fb817b6fa0c009740cf75cba79fcf9b46703077e9eb4446f3c34a53: Status 404 returned error can't find the container with id c6f5b7101fb817b6fa0c009740cf75cba79fcf9b46703077e9eb4446f3c34a53 Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.483105 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:50:59 crc kubenswrapper[5010]: W1126 15:50:59.501855 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3bb3c6b5_4b48_4354_bf61_2a9e4a250669.slice/crio-05e35e406f855c827847648686b63c3dd5ea7ac140da35ed81f36b185989f588 WatchSource:0}: Error finding container 05e35e406f855c827847648686b63c3dd5ea7ac140da35ed81f36b185989f588: Status 404 returned error can't find the container with id 05e35e406f855c827847648686b63c3dd5ea7ac140da35ed81f36b185989f588 Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.560756 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-brgbd"] Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.561996 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.565050 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.565132 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.580340 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-brgbd"] Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.622439 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-config-data\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.622613 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdn24\" (UniqueName: \"kubernetes.io/projected/e03a12d5-1d79-4780-b7cb-e752eaec9783-kube-api-access-fdn24\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.622662 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.622719 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-scripts\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.725082 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdn24\" (UniqueName: \"kubernetes.io/projected/e03a12d5-1d79-4780-b7cb-e752eaec9783-kube-api-access-fdn24\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.725163 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.725194 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-scripts\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.725248 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-config-data\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.734250 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-config-data\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.734498 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.734915 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-scripts\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.743642 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdn24\" (UniqueName: \"kubernetes.io/projected/e03a12d5-1d79-4780-b7cb-e752eaec9783-kube-api-access-fdn24\") pod \"nova-cell1-conductor-db-sync-brgbd\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.863958 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.899408 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:50:59 crc kubenswrapper[5010]: W1126 15:50:59.947064 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25f06307_6ec3_453f_b620_f76285347939.slice/crio-90fea5c929646bff8c1c3f1273ecfda35170201231f60ad14df8c90622a318dc WatchSource:0}: Error finding container 90fea5c929646bff8c1c3f1273ecfda35170201231f60ad14df8c90622a318dc: Status 404 returned error can't find the container with id 90fea5c929646bff8c1c3f1273ecfda35170201231f60ad14df8c90622a318dc Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.949593 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.949637 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-hhsq2"] Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.959809 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:50:59 crc kubenswrapper[5010]: I1126 15:50:59.975280 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5dc9ff69c7-klxtz"] Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.203938 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bb3c6b5-4b48-4354-bf61-2a9e4a250669","Type":"ContainerStarted","Data":"05e35e406f855c827847648686b63c3dd5ea7ac140da35ed81f36b185989f588"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.206004 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" event={"ID":"25f06307-6ec3-453f-b620-f76285347939","Type":"ContainerStarted","Data":"90fea5c929646bff8c1c3f1273ecfda35170201231f60ad14df8c90622a318dc"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.208269 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hhsq2" event={"ID":"91f360e4-480f-4398-9d5c-c19e3146a160","Type":"ContainerStarted","Data":"c393970aa38219e0923f22725f0b6970232ca8d43603a54c2a99a52e2f1214d6"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.210905 5010 generic.go:334] "Generic (PLEG): container finished" podID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerID="6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373" exitCode=0 Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.210961 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8gn7x" event={"ID":"15e0c240-1511-4902-9893-a3f9dd146c8a","Type":"ContainerDied","Data":"6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.210979 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8gn7x" event={"ID":"15e0c240-1511-4902-9893-a3f9dd146c8a","Type":"ContainerStarted","Data":"c6f5b7101fb817b6fa0c009740cf75cba79fcf9b46703077e9eb4446f3c34a53"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.212631 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a624105a-6d36-4a58-b571-d16de2550312","Type":"ContainerStarted","Data":"69bcfe0bbc0fe255e2384ceff3da77ec787507f5bcbc7726ff6b9b1b3c7c3672"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.214457 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e508c27c-9334-411e-9861-5be1672da574","Type":"ContainerStarted","Data":"bd045f2b0970c5f8f13a1264775c6c0922966638c19f62efd5dd8bf35a470426"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.216144 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7416391c-360d-4bcd-9cfa-6977446520ed","Type":"ContainerStarted","Data":"978aa98c8f44add19aa0ebe77bceff4a1aed24cba20df0f3983921cd2f7adf68"} Nov 26 15:51:00 crc kubenswrapper[5010]: I1126 15:51:00.474778 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-brgbd"] Nov 26 15:51:00 crc kubenswrapper[5010]: W1126 15:51:00.491559 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode03a12d5_1d79_4780_b7cb_e752eaec9783.slice/crio-61e87e84974e4d708e8e3b05cf1af678956133173813de25ac99991750eb4a14 WatchSource:0}: Error finding container 61e87e84974e4d708e8e3b05cf1af678956133173813de25ac99991750eb4a14: Status 404 returned error can't find the container with id 61e87e84974e4d708e8e3b05cf1af678956133173813de25ac99991750eb4a14 Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.233071 5010 generic.go:334] "Generic (PLEG): container finished" podID="25f06307-6ec3-453f-b620-f76285347939" containerID="9b30183b18b44114598b0c581ef6557abe5c293dc0a8c6e99f2054f8cd4eaa66" exitCode=0 Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.233134 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" event={"ID":"25f06307-6ec3-453f-b620-f76285347939","Type":"ContainerDied","Data":"9b30183b18b44114598b0c581ef6557abe5c293dc0a8c6e99f2054f8cd4eaa66"} Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.236667 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hhsq2" event={"ID":"91f360e4-480f-4398-9d5c-c19e3146a160","Type":"ContainerStarted","Data":"11b2381ddcc7f6d25f34d85e987cdc506d5085296b8785658e79bf5262db374c"} Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.239833 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-brgbd" event={"ID":"e03a12d5-1d79-4780-b7cb-e752eaec9783","Type":"ContainerStarted","Data":"6806ec765a15b121cc35d21ba23121403bf62a62cbc3ac67abb3c89a0321b696"} Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.239858 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-brgbd" event={"ID":"e03a12d5-1d79-4780-b7cb-e752eaec9783","Type":"ContainerStarted","Data":"61e87e84974e4d708e8e3b05cf1af678956133173813de25ac99991750eb4a14"} Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.246226 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8gn7x" event={"ID":"15e0c240-1511-4902-9893-a3f9dd146c8a","Type":"ContainerStarted","Data":"1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107"} Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.274476 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-brgbd" podStartSLOduration=2.274454866 podStartE2EDuration="2.274454866s" podCreationTimestamp="2025-11-26 15:50:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:01.272093147 +0000 UTC m=+1482.062810325" watchObservedRunningTime="2025-11-26 15:51:01.274454866 +0000 UTC m=+1482.065172014" Nov 26 15:51:01 crc kubenswrapper[5010]: I1126 15:51:01.317001 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-hhsq2" podStartSLOduration=3.31698126 podStartE2EDuration="3.31698126s" podCreationTimestamp="2025-11-26 15:50:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:01.307679867 +0000 UTC m=+1482.098397015" watchObservedRunningTime="2025-11-26 15:51:01.31698126 +0000 UTC m=+1482.107698408" Nov 26 15:51:02 crc kubenswrapper[5010]: I1126 15:51:02.262971 5010 generic.go:334] "Generic (PLEG): container finished" podID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerID="1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107" exitCode=0 Nov 26 15:51:02 crc kubenswrapper[5010]: I1126 15:51:02.263047 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8gn7x" event={"ID":"15e0c240-1511-4902-9893-a3f9dd146c8a","Type":"ContainerDied","Data":"1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107"} Nov 26 15:51:02 crc kubenswrapper[5010]: I1126 15:51:02.341827 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:02 crc kubenswrapper[5010]: I1126 15:51:02.371113 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.308611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" event={"ID":"25f06307-6ec3-453f-b620-f76285347939","Type":"ContainerStarted","Data":"5827b19dbc732d7bd831f5dd9073ae85ba02426144fcab2670e0cb07b87a87fb"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.311054 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.314975 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-log" containerID="cri-o://2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e" gracePeriod=30 Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.315008 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-metadata" containerID="cri-o://e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3" gracePeriod=30 Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.314930 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a624105a-6d36-4a58-b571-d16de2550312","Type":"ContainerStarted","Data":"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.315924 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a624105a-6d36-4a58-b571-d16de2550312","Type":"ContainerStarted","Data":"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.322286 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8gn7x" event={"ID":"15e0c240-1511-4902-9893-a3f9dd146c8a","Type":"ContainerStarted","Data":"5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.328929 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e508c27c-9334-411e-9861-5be1672da574","Type":"ContainerStarted","Data":"969350a6fc0589ee4b7c24fd73c9e62a2fc1df10647d35c52db8cb05f1246d4f"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.328973 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e508c27c-9334-411e-9861-5be1672da574","Type":"ContainerStarted","Data":"daced953b28d667c9ccc7c8bf7f87db73db22d0e8c3015b55f0d059868a94d77"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.334575 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7416391c-360d-4bcd-9cfa-6977446520ed","Type":"ContainerStarted","Data":"22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.334640 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="7416391c-360d-4bcd-9cfa-6977446520ed" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389" gracePeriod=30 Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.334799 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" podStartSLOduration=7.334783284 podStartE2EDuration="7.334783284s" podCreationTimestamp="2025-11-26 15:50:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:05.334157848 +0000 UTC m=+1486.124874996" watchObservedRunningTime="2025-11-26 15:51:05.334783284 +0000 UTC m=+1486.125500432" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.344148 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bb3c6b5-4b48-4354-bf61-2a9e4a250669","Type":"ContainerStarted","Data":"84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a"} Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.380520 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.138516006 podStartE2EDuration="7.380498138s" podCreationTimestamp="2025-11-26 15:50:58 +0000 UTC" firstStartedPulling="2025-11-26 15:50:59.890851116 +0000 UTC m=+1480.681568264" lastFinishedPulling="2025-11-26 15:51:04.132833228 +0000 UTC m=+1484.923550396" observedRunningTime="2025-11-26 15:51:05.355087162 +0000 UTC m=+1486.145804310" watchObservedRunningTime="2025-11-26 15:51:05.380498138 +0000 UTC m=+1486.171215286" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.411874 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8gn7x" podStartSLOduration=4.493503855 podStartE2EDuration="8.411853572s" podCreationTimestamp="2025-11-26 15:50:57 +0000 UTC" firstStartedPulling="2025-11-26 15:51:00.212860671 +0000 UTC m=+1481.003577829" lastFinishedPulling="2025-11-26 15:51:04.131210378 +0000 UTC m=+1484.921927546" observedRunningTime="2025-11-26 15:51:05.375220536 +0000 UTC m=+1486.165937684" watchObservedRunningTime="2025-11-26 15:51:05.411853572 +0000 UTC m=+1486.202570720" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.418360 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.216007513 podStartE2EDuration="7.418342704s" podCreationTimestamp="2025-11-26 15:50:58 +0000 UTC" firstStartedPulling="2025-11-26 15:50:59.930563869 +0000 UTC m=+1480.721281017" lastFinishedPulling="2025-11-26 15:51:04.13289902 +0000 UTC m=+1484.923616208" observedRunningTime="2025-11-26 15:51:05.404349894 +0000 UTC m=+1486.195067042" watchObservedRunningTime="2025-11-26 15:51:05.418342704 +0000 UTC m=+1486.209059852" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.448141 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.823064884 podStartE2EDuration="7.448118389s" podCreationTimestamp="2025-11-26 15:50:58 +0000 UTC" firstStartedPulling="2025-11-26 15:50:59.5088608 +0000 UTC m=+1480.299577948" lastFinishedPulling="2025-11-26 15:51:04.133914295 +0000 UTC m=+1484.924631453" observedRunningTime="2025-11-26 15:51:05.424638522 +0000 UTC m=+1486.215355670" watchObservedRunningTime="2025-11-26 15:51:05.448118389 +0000 UTC m=+1486.238835537" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.472279 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.219712056 podStartE2EDuration="7.472260143s" podCreationTimestamp="2025-11-26 15:50:58 +0000 UTC" firstStartedPulling="2025-11-26 15:50:59.87863828 +0000 UTC m=+1480.669355428" lastFinishedPulling="2025-11-26 15:51:04.131186367 +0000 UTC m=+1484.921903515" observedRunningTime="2025-11-26 15:51:05.449977026 +0000 UTC m=+1486.240694174" watchObservedRunningTime="2025-11-26 15:51:05.472260143 +0000 UTC m=+1486.262977291" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.873685 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.879109 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-combined-ca-bundle\") pod \"a624105a-6d36-4a58-b571-d16de2550312\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.879322 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8s8ll\" (UniqueName: \"kubernetes.io/projected/a624105a-6d36-4a58-b571-d16de2550312-kube-api-access-8s8ll\") pod \"a624105a-6d36-4a58-b571-d16de2550312\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.879383 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a624105a-6d36-4a58-b571-d16de2550312-logs\") pod \"a624105a-6d36-4a58-b571-d16de2550312\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.879424 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-config-data\") pod \"a624105a-6d36-4a58-b571-d16de2550312\" (UID: \"a624105a-6d36-4a58-b571-d16de2550312\") " Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.879797 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a624105a-6d36-4a58-b571-d16de2550312-logs" (OuterVolumeSpecName: "logs") pod "a624105a-6d36-4a58-b571-d16de2550312" (UID: "a624105a-6d36-4a58-b571-d16de2550312"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.880344 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a624105a-6d36-4a58-b571-d16de2550312-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.886465 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a624105a-6d36-4a58-b571-d16de2550312-kube-api-access-8s8ll" (OuterVolumeSpecName: "kube-api-access-8s8ll") pod "a624105a-6d36-4a58-b571-d16de2550312" (UID: "a624105a-6d36-4a58-b571-d16de2550312"). InnerVolumeSpecName "kube-api-access-8s8ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.929164 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-config-data" (OuterVolumeSpecName: "config-data") pod "a624105a-6d36-4a58-b571-d16de2550312" (UID: "a624105a-6d36-4a58-b571-d16de2550312"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.947054 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a624105a-6d36-4a58-b571-d16de2550312" (UID: "a624105a-6d36-4a58-b571-d16de2550312"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.985021 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.985319 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a624105a-6d36-4a58-b571-d16de2550312-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:05 crc kubenswrapper[5010]: I1126 15:51:05.987300 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8s8ll\" (UniqueName: \"kubernetes.io/projected/a624105a-6d36-4a58-b571-d16de2550312-kube-api-access-8s8ll\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.367669 5010 generic.go:334] "Generic (PLEG): container finished" podID="a624105a-6d36-4a58-b571-d16de2550312" containerID="e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3" exitCode=0 Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.369535 5010 generic.go:334] "Generic (PLEG): container finished" podID="a624105a-6d36-4a58-b571-d16de2550312" containerID="2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e" exitCode=143 Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.367889 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a624105a-6d36-4a58-b571-d16de2550312","Type":"ContainerDied","Data":"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3"} Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.370062 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a624105a-6d36-4a58-b571-d16de2550312","Type":"ContainerDied","Data":"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e"} Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.370095 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a624105a-6d36-4a58-b571-d16de2550312","Type":"ContainerDied","Data":"69bcfe0bbc0fe255e2384ceff3da77ec787507f5bcbc7726ff6b9b1b3c7c3672"} Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.370125 5010 scope.go:117] "RemoveContainer" containerID="e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.367852 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.449876 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.465815 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.468647 5010 scope.go:117] "RemoveContainer" containerID="2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.478991 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:06 crc kubenswrapper[5010]: E1126 15:51:06.480759 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-metadata" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.480970 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-metadata" Nov 26 15:51:06 crc kubenswrapper[5010]: E1126 15:51:06.481200 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-log" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.481372 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-log" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.481960 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-log" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.482211 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a624105a-6d36-4a58-b571-d16de2550312" containerName="nova-metadata-metadata" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.484861 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.487878 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.488094 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.498082 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.500043 5010 scope.go:117] "RemoveContainer" containerID="e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3" Nov 26 15:51:06 crc kubenswrapper[5010]: E1126 15:51:06.502535 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3\": container with ID starting with e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3 not found: ID does not exist" containerID="e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.502576 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3"} err="failed to get container status \"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3\": rpc error: code = NotFound desc = could not find container \"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3\": container with ID starting with e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3 not found: ID does not exist" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.502603 5010 scope.go:117] "RemoveContainer" containerID="2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e" Nov 26 15:51:06 crc kubenswrapper[5010]: E1126 15:51:06.503006 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e\": container with ID starting with 2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e not found: ID does not exist" containerID="2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.503419 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e"} err="failed to get container status \"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e\": rpc error: code = NotFound desc = could not find container \"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e\": container with ID starting with 2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e not found: ID does not exist" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.507531 5010 scope.go:117] "RemoveContainer" containerID="e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.508774 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3"} err="failed to get container status \"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3\": rpc error: code = NotFound desc = could not find container \"e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3\": container with ID starting with e32720e0fde5e53d050a7e63c51f88d6e27a86d3c5773d4466629e03a96ae5e3 not found: ID does not exist" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.508838 5010 scope.go:117] "RemoveContainer" containerID="2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.510267 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e"} err="failed to get container status \"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e\": rpc error: code = NotFound desc = could not find container \"2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e\": container with ID starting with 2d1626e05dc958a7e4643bd389aee9053028d0637c6c7c8f81b41d3ed2b4396e not found: ID does not exist" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.600856 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.601005 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.601052 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b74f3f9-8fe4-4d82-a353-694e050fd27c-logs\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.601095 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-config-data\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.601118 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cz8mn\" (UniqueName: \"kubernetes.io/projected/2b74f3f9-8fe4-4d82-a353-694e050fd27c-kube-api-access-cz8mn\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.703322 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b74f3f9-8fe4-4d82-a353-694e050fd27c-logs\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.703434 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-config-data\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.703475 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cz8mn\" (UniqueName: \"kubernetes.io/projected/2b74f3f9-8fe4-4d82-a353-694e050fd27c-kube-api-access-cz8mn\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.703672 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.703818 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.704463 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b74f3f9-8fe4-4d82-a353-694e050fd27c-logs\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.709056 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-config-data\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.715862 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.717524 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.722136 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cz8mn\" (UniqueName: \"kubernetes.io/projected/2b74f3f9-8fe4-4d82-a353-694e050fd27c-kube-api-access-cz8mn\") pod \"nova-metadata-0\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " pod="openstack/nova-metadata-0" Nov 26 15:51:06 crc kubenswrapper[5010]: I1126 15:51:06.811581 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:07 crc kubenswrapper[5010]: W1126 15:51:07.339390 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2b74f3f9_8fe4_4d82_a353_694e050fd27c.slice/crio-d907729982cbaf5db23cc36d90eda27aae482a4761cf699d09e6c1a601b62d23 WatchSource:0}: Error finding container d907729982cbaf5db23cc36d90eda27aae482a4761cf699d09e6c1a601b62d23: Status 404 returned error can't find the container with id d907729982cbaf5db23cc36d90eda27aae482a4761cf699d09e6c1a601b62d23 Nov 26 15:51:07 crc kubenswrapper[5010]: I1126 15:51:07.339700 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:07 crc kubenswrapper[5010]: I1126 15:51:07.393990 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b74f3f9-8fe4-4d82-a353-694e050fd27c","Type":"ContainerStarted","Data":"d907729982cbaf5db23cc36d90eda27aae482a4761cf699d09e6c1a601b62d23"} Nov 26 15:51:07 crc kubenswrapper[5010]: I1126 15:51:07.902616 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a624105a-6d36-4a58-b571-d16de2550312" path="/var/lib/kubelet/pods/a624105a-6d36-4a58-b571-d16de2550312/volumes" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.214746 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.214797 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.222270 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-csc9n"] Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.228259 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.238062 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-csc9n"] Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.304604 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.336000 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-catalog-content\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.337253 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-utilities\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.337547 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cglhd\" (UniqueName: \"kubernetes.io/projected/422e914d-f59a-4d7b-9614-77db0cf86ab6-kube-api-access-cglhd\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.405058 5010 generic.go:334] "Generic (PLEG): container finished" podID="91f360e4-480f-4398-9d5c-c19e3146a160" containerID="11b2381ddcc7f6d25f34d85e987cdc506d5085296b8785658e79bf5262db374c" exitCode=0 Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.405163 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hhsq2" event={"ID":"91f360e4-480f-4398-9d5c-c19e3146a160","Type":"ContainerDied","Data":"11b2381ddcc7f6d25f34d85e987cdc506d5085296b8785658e79bf5262db374c"} Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.406800 5010 generic.go:334] "Generic (PLEG): container finished" podID="e03a12d5-1d79-4780-b7cb-e752eaec9783" containerID="6806ec765a15b121cc35d21ba23121403bf62a62cbc3ac67abb3c89a0321b696" exitCode=0 Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.406869 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-brgbd" event={"ID":"e03a12d5-1d79-4780-b7cb-e752eaec9783","Type":"ContainerDied","Data":"6806ec765a15b121cc35d21ba23121403bf62a62cbc3ac67abb3c89a0321b696"} Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.408867 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b74f3f9-8fe4-4d82-a353-694e050fd27c","Type":"ContainerStarted","Data":"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2"} Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.408897 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b74f3f9-8fe4-4d82-a353-694e050fd27c","Type":"ContainerStarted","Data":"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90"} Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.441181 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-catalog-content\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.441235 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-utilities\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.442620 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cglhd\" (UniqueName: \"kubernetes.io/projected/422e914d-f59a-4d7b-9614-77db0cf86ab6-kube-api-access-cglhd\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.444295 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-catalog-content\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.445378 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-utilities\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.477650 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cglhd\" (UniqueName: \"kubernetes.io/projected/422e914d-f59a-4d7b-9614-77db0cf86ab6-kube-api-access-cglhd\") pod \"certified-operators-csc9n\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.572362 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.580379 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.584981 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.585123 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.630419 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.664547 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.664528867 podStartE2EDuration="2.664528867s" podCreationTimestamp="2025-11-26 15:51:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:08.465148239 +0000 UTC m=+1489.255865427" watchObservedRunningTime="2025-11-26 15:51:08.664528867 +0000 UTC m=+1489.455246015" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.919439 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:51:08 crc kubenswrapper[5010]: I1126 15:51:08.919477 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.151206 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-csc9n"] Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.421186 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerStarted","Data":"d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51"} Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.421561 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerStarted","Data":"69ddf6e7f5ff6211c6d55bd29417cc1f4a46f11c205f937e800c98b4375769c1"} Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.475889 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.871023 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.880007 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983396 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-scripts\") pod \"e03a12d5-1d79-4780-b7cb-e752eaec9783\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983452 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-config-data\") pod \"91f360e4-480f-4398-9d5c-c19e3146a160\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983503 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-config-data\") pod \"e03a12d5-1d79-4780-b7cb-e752eaec9783\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983569 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-scripts\") pod \"91f360e4-480f-4398-9d5c-c19e3146a160\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983589 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2dn5\" (UniqueName: \"kubernetes.io/projected/91f360e4-480f-4398-9d5c-c19e3146a160-kube-api-access-c2dn5\") pod \"91f360e4-480f-4398-9d5c-c19e3146a160\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983606 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-combined-ca-bundle\") pod \"91f360e4-480f-4398-9d5c-c19e3146a160\" (UID: \"91f360e4-480f-4398-9d5c-c19e3146a160\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983668 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-combined-ca-bundle\") pod \"e03a12d5-1d79-4780-b7cb-e752eaec9783\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.983766 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdn24\" (UniqueName: \"kubernetes.io/projected/e03a12d5-1d79-4780-b7cb-e752eaec9783-kube-api-access-fdn24\") pod \"e03a12d5-1d79-4780-b7cb-e752eaec9783\" (UID: \"e03a12d5-1d79-4780-b7cb-e752eaec9783\") " Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.989057 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-scripts" (OuterVolumeSpecName: "scripts") pod "e03a12d5-1d79-4780-b7cb-e752eaec9783" (UID: "e03a12d5-1d79-4780-b7cb-e752eaec9783"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:09 crc kubenswrapper[5010]: I1126 15:51:09.989326 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91f360e4-480f-4398-9d5c-c19e3146a160-kube-api-access-c2dn5" (OuterVolumeSpecName: "kube-api-access-c2dn5") pod "91f360e4-480f-4398-9d5c-c19e3146a160" (UID: "91f360e4-480f-4398-9d5c-c19e3146a160"). InnerVolumeSpecName "kube-api-access-c2dn5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.003149 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.003165 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.191:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.003999 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e03a12d5-1d79-4780-b7cb-e752eaec9783-kube-api-access-fdn24" (OuterVolumeSpecName: "kube-api-access-fdn24") pod "e03a12d5-1d79-4780-b7cb-e752eaec9783" (UID: "e03a12d5-1d79-4780-b7cb-e752eaec9783"). InnerVolumeSpecName "kube-api-access-fdn24". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.015441 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-scripts" (OuterVolumeSpecName: "scripts") pod "91f360e4-480f-4398-9d5c-c19e3146a160" (UID: "91f360e4-480f-4398-9d5c-c19e3146a160"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.018904 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e03a12d5-1d79-4780-b7cb-e752eaec9783" (UID: "e03a12d5-1d79-4780-b7cb-e752eaec9783"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.020618 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "91f360e4-480f-4398-9d5c-c19e3146a160" (UID: "91f360e4-480f-4398-9d5c-c19e3146a160"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.020822 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-config-data" (OuterVolumeSpecName: "config-data") pod "91f360e4-480f-4398-9d5c-c19e3146a160" (UID: "91f360e4-480f-4398-9d5c-c19e3146a160"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.021300 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-config-data" (OuterVolumeSpecName: "config-data") pod "e03a12d5-1d79-4780-b7cb-e752eaec9783" (UID: "e03a12d5-1d79-4780-b7cb-e752eaec9783"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088407 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdn24\" (UniqueName: \"kubernetes.io/projected/e03a12d5-1d79-4780-b7cb-e752eaec9783-kube-api-access-fdn24\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088454 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088474 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088493 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088510 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088528 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2dn5\" (UniqueName: \"kubernetes.io/projected/91f360e4-480f-4398-9d5c-c19e3146a160-kube-api-access-c2dn5\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088547 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91f360e4-480f-4398-9d5c-c19e3146a160-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.088641 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e03a12d5-1d79-4780-b7cb-e752eaec9783-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.433927 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-hhsq2" event={"ID":"91f360e4-480f-4398-9d5c-c19e3146a160","Type":"ContainerDied","Data":"c393970aa38219e0923f22725f0b6970232ca8d43603a54c2a99a52e2f1214d6"} Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.433989 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c393970aa38219e0923f22725f0b6970232ca8d43603a54c2a99a52e2f1214d6" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.433952 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-hhsq2" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.445368 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-brgbd" event={"ID":"e03a12d5-1d79-4780-b7cb-e752eaec9783","Type":"ContainerDied","Data":"61e87e84974e4d708e8e3b05cf1af678956133173813de25ac99991750eb4a14"} Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.445409 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61e87e84974e4d708e8e3b05cf1af678956133173813de25ac99991750eb4a14" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.445401 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-brgbd" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.464145 5010 generic.go:334] "Generic (PLEG): container finished" podID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerID="d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51" exitCode=0 Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.464202 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerDied","Data":"d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51"} Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.534156 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 15:51:10 crc kubenswrapper[5010]: E1126 15:51:10.534970 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e03a12d5-1d79-4780-b7cb-e752eaec9783" containerName="nova-cell1-conductor-db-sync" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.535098 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e03a12d5-1d79-4780-b7cb-e752eaec9783" containerName="nova-cell1-conductor-db-sync" Nov 26 15:51:10 crc kubenswrapper[5010]: E1126 15:51:10.535261 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91f360e4-480f-4398-9d5c-c19e3146a160" containerName="nova-manage" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.535345 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="91f360e4-480f-4398-9d5c-c19e3146a160" containerName="nova-manage" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.535686 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e03a12d5-1d79-4780-b7cb-e752eaec9783" containerName="nova-cell1-conductor-db-sync" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.535819 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="91f360e4-480f-4398-9d5c-c19e3146a160" containerName="nova-manage" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.536746 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.541503 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.552550 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.599535 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5xcs\" (UniqueName: \"kubernetes.io/projected/1cfc9265-de84-4047-9e01-69444aa4d9f5-kube-api-access-d5xcs\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.599646 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.599721 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.652342 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.652629 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-log" containerID="cri-o://daced953b28d667c9ccc7c8bf7f87db73db22d0e8c3015b55f0d059868a94d77" gracePeriod=30 Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.652982 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-api" containerID="cri-o://969350a6fc0589ee4b7c24fd73c9e62a2fc1df10647d35c52db8cb05f1246d4f" gracePeriod=30 Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.663371 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.685160 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.685387 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-log" containerID="cri-o://5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90" gracePeriod=30 Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.685828 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-metadata" containerID="cri-o://a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2" gracePeriod=30 Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.703820 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.703869 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.703982 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5xcs\" (UniqueName: \"kubernetes.io/projected/1cfc9265-de84-4047-9e01-69444aa4d9f5-kube-api-access-d5xcs\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.707627 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.709870 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.723552 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5xcs\" (UniqueName: \"kubernetes.io/projected/1cfc9265-de84-4047-9e01-69444aa4d9f5-kube-api-access-d5xcs\") pod \"nova-cell1-conductor-0\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:10 crc kubenswrapper[5010]: I1126 15:51:10.856522 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.280828 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.318225 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b74f3f9-8fe4-4d82-a353-694e050fd27c-logs\") pod \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.318367 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-nova-metadata-tls-certs\") pod \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.318466 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-config-data\") pod \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.318498 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-combined-ca-bundle\") pod \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.318557 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cz8mn\" (UniqueName: \"kubernetes.io/projected/2b74f3f9-8fe4-4d82-a353-694e050fd27c-kube-api-access-cz8mn\") pod \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\" (UID: \"2b74f3f9-8fe4-4d82-a353-694e050fd27c\") " Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.319649 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b74f3f9-8fe4-4d82-a353-694e050fd27c-logs" (OuterVolumeSpecName: "logs") pod "2b74f3f9-8fe4-4d82-a353-694e050fd27c" (UID: "2b74f3f9-8fe4-4d82-a353-694e050fd27c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.329000 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b74f3f9-8fe4-4d82-a353-694e050fd27c-kube-api-access-cz8mn" (OuterVolumeSpecName: "kube-api-access-cz8mn") pod "2b74f3f9-8fe4-4d82-a353-694e050fd27c" (UID: "2b74f3f9-8fe4-4d82-a353-694e050fd27c"). InnerVolumeSpecName "kube-api-access-cz8mn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.348826 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-config-data" (OuterVolumeSpecName: "config-data") pod "2b74f3f9-8fe4-4d82-a353-694e050fd27c" (UID: "2b74f3f9-8fe4-4d82-a353-694e050fd27c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.352213 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2b74f3f9-8fe4-4d82-a353-694e050fd27c" (UID: "2b74f3f9-8fe4-4d82-a353-694e050fd27c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.376170 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "2b74f3f9-8fe4-4d82-a353-694e050fd27c" (UID: "2b74f3f9-8fe4-4d82-a353-694e050fd27c"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.420927 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b74f3f9-8fe4-4d82-a353-694e050fd27c-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.420972 5010 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.420988 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.421001 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b74f3f9-8fe4-4d82-a353-694e050fd27c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.421012 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cz8mn\" (UniqueName: \"kubernetes.io/projected/2b74f3f9-8fe4-4d82-a353-694e050fd27c-kube-api-access-cz8mn\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.423374 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.423431 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.429164 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.475964 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"1cfc9265-de84-4047-9e01-69444aa4d9f5","Type":"ContainerStarted","Data":"109c6fcb3b5f82d3cbd78e2c85049a846c7cede4f147afff2cc9dcff3a5ef005"} Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.477732 5010 generic.go:334] "Generic (PLEG): container finished" podID="e508c27c-9334-411e-9861-5be1672da574" containerID="daced953b28d667c9ccc7c8bf7f87db73db22d0e8c3015b55f0d059868a94d77" exitCode=143 Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.477773 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e508c27c-9334-411e-9861-5be1672da574","Type":"ContainerDied","Data":"daced953b28d667c9ccc7c8bf7f87db73db22d0e8c3015b55f0d059868a94d77"} Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.479568 5010 generic.go:334] "Generic (PLEG): container finished" podID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerID="a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2" exitCode=0 Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.479586 5010 generic.go:334] "Generic (PLEG): container finished" podID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerID="5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90" exitCode=143 Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.480475 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.481449 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b74f3f9-8fe4-4d82-a353-694e050fd27c","Type":"ContainerDied","Data":"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2"} Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.481502 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b74f3f9-8fe4-4d82-a353-694e050fd27c","Type":"ContainerDied","Data":"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90"} Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.481518 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b74f3f9-8fe4-4d82-a353-694e050fd27c","Type":"ContainerDied","Data":"d907729982cbaf5db23cc36d90eda27aae482a4761cf699d09e6c1a601b62d23"} Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.481536 5010 scope.go:117] "RemoveContainer" containerID="a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.554821 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.566616 5010 scope.go:117] "RemoveContainer" containerID="5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.567671 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.578665 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:11 crc kubenswrapper[5010]: E1126 15:51:11.579253 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-metadata" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.579280 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-metadata" Nov 26 15:51:11 crc kubenswrapper[5010]: E1126 15:51:11.579314 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-log" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.579324 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-log" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.579565 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-log" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.579590 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" containerName="nova-metadata-metadata" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.580909 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.585522 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.585544 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.585938 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.609535 5010 scope.go:117] "RemoveContainer" containerID="a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2" Nov 26 15:51:11 crc kubenswrapper[5010]: E1126 15:51:11.610150 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2\": container with ID starting with a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2 not found: ID does not exist" containerID="a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.610187 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2"} err="failed to get container status \"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2\": rpc error: code = NotFound desc = could not find container \"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2\": container with ID starting with a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2 not found: ID does not exist" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.610214 5010 scope.go:117] "RemoveContainer" containerID="5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90" Nov 26 15:51:11 crc kubenswrapper[5010]: E1126 15:51:11.610908 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90\": container with ID starting with 5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90 not found: ID does not exist" containerID="5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.610948 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90"} err="failed to get container status \"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90\": rpc error: code = NotFound desc = could not find container \"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90\": container with ID starting with 5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90 not found: ID does not exist" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.610974 5010 scope.go:117] "RemoveContainer" containerID="a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.613293 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2"} err="failed to get container status \"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2\": rpc error: code = NotFound desc = could not find container \"a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2\": container with ID starting with a46123091c67e99e4a5502207b185e3fbf99d252734f545d75f694d877e4abf2 not found: ID does not exist" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.613323 5010 scope.go:117] "RemoveContainer" containerID="5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.613698 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90"} err="failed to get container status \"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90\": rpc error: code = NotFound desc = could not find container \"5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90\": container with ID starting with 5946d906ced17e4b1bb178d5d0c38480d265c87667272d0850a276853b0aef90 not found: ID does not exist" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.634765 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.635078 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/107d20b7-bdc0-467f-a530-a6ed85ecf258-logs\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.635211 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.635256 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7t28\" (UniqueName: \"kubernetes.io/projected/107d20b7-bdc0-467f-a530-a6ed85ecf258-kube-api-access-k7t28\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.635283 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-config-data\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.737217 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/107d20b7-bdc0-467f-a530-a6ed85ecf258-logs\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.737288 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.737317 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7t28\" (UniqueName: \"kubernetes.io/projected/107d20b7-bdc0-467f-a530-a6ed85ecf258-kube-api-access-k7t28\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.737335 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-config-data\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.737392 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.737748 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/107d20b7-bdc0-467f-a530-a6ed85ecf258-logs\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.744198 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-config-data\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.747084 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.747378 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.764781 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7t28\" (UniqueName: \"kubernetes.io/projected/107d20b7-bdc0-467f-a530-a6ed85ecf258-kube-api-access-k7t28\") pod \"nova-metadata-0\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " pod="openstack/nova-metadata-0" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.906166 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b74f3f9-8fe4-4d82-a353-694e050fd27c" path="/var/lib/kubelet/pods/2b74f3f9-8fe4-4d82-a353-694e050fd27c/volumes" Nov 26 15:51:11 crc kubenswrapper[5010]: I1126 15:51:11.915308 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:51:12 crc kubenswrapper[5010]: I1126 15:51:12.364112 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:12 crc kubenswrapper[5010]: W1126 15:51:12.367899 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod107d20b7_bdc0_467f_a530_a6ed85ecf258.slice/crio-b03bf94ac2fe082918b042b25c4cd45bab7c89db2ec7699f547a551f682e4efe WatchSource:0}: Error finding container b03bf94ac2fe082918b042b25c4cd45bab7c89db2ec7699f547a551f682e4efe: Status 404 returned error can't find the container with id b03bf94ac2fe082918b042b25c4cd45bab7c89db2ec7699f547a551f682e4efe Nov 26 15:51:12 crc kubenswrapper[5010]: I1126 15:51:12.497450 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"1cfc9265-de84-4047-9e01-69444aa4d9f5","Type":"ContainerStarted","Data":"f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930"} Nov 26 15:51:12 crc kubenswrapper[5010]: I1126 15:51:12.497575 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:12 crc kubenswrapper[5010]: I1126 15:51:12.500119 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="3bb3c6b5-4b48-4354-bf61-2a9e4a250669" containerName="nova-scheduler-scheduler" containerID="cri-o://84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a" gracePeriod=30 Nov 26 15:51:12 crc kubenswrapper[5010]: I1126 15:51:12.500492 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"107d20b7-bdc0-467f-a530-a6ed85ecf258","Type":"ContainerStarted","Data":"b03bf94ac2fe082918b042b25c4cd45bab7c89db2ec7699f547a551f682e4efe"} Nov 26 15:51:12 crc kubenswrapper[5010]: I1126 15:51:12.519080 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.519060498 podStartE2EDuration="2.519060498s" podCreationTimestamp="2025-11-26 15:51:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:12.517528419 +0000 UTC m=+1493.308245607" watchObservedRunningTime="2025-11-26 15:51:12.519060498 +0000 UTC m=+1493.309777636" Nov 26 15:51:13 crc kubenswrapper[5010]: I1126 15:51:13.542492 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"107d20b7-bdc0-467f-a530-a6ed85ecf258","Type":"ContainerStarted","Data":"40773634bcf0dbfe66732e09a3774b69fe16ab2fa9cf86335c7a911522a33022"} Nov 26 15:51:13 crc kubenswrapper[5010]: I1126 15:51:13.542842 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"107d20b7-bdc0-467f-a530-a6ed85ecf258","Type":"ContainerStarted","Data":"616f09e91a853fa7f48dc2772ca9ce83cdcd59f906f1c5dc972742151702135d"} Nov 26 15:51:13 crc kubenswrapper[5010]: I1126 15:51:13.572749 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.572732105 podStartE2EDuration="2.572732105s" podCreationTimestamp="2025-11-26 15:51:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:13.56693566 +0000 UTC m=+1494.357652808" watchObservedRunningTime="2025-11-26 15:51:13.572732105 +0000 UTC m=+1494.363449263" Nov 26 15:51:13 crc kubenswrapper[5010]: E1126 15:51:13.588288 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:51:13 crc kubenswrapper[5010]: E1126 15:51:13.589875 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:51:13 crc kubenswrapper[5010]: E1126 15:51:13.591561 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:51:13 crc kubenswrapper[5010]: E1126 15:51:13.591683 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="3bb3c6b5-4b48-4354-bf61-2a9e4a250669" containerName="nova-scheduler-scheduler" Nov 26 15:51:13 crc kubenswrapper[5010]: I1126 15:51:13.940949 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:51:14 crc kubenswrapper[5010]: I1126 15:51:14.015474 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b84d979b9-x2vg9"] Nov 26 15:51:14 crc kubenswrapper[5010]: I1126 15:51:14.015768 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerName="dnsmasq-dns" containerID="cri-o://21ca72119b84346f83405f5a18e259bae7b196004fd281f29d68a5586931f253" gracePeriod=10 Nov 26 15:51:14 crc kubenswrapper[5010]: I1126 15:51:14.397000 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 15:51:14 crc kubenswrapper[5010]: I1126 15:51:14.557554 5010 generic.go:334] "Generic (PLEG): container finished" podID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerID="21ca72119b84346f83405f5a18e259bae7b196004fd281f29d68a5586931f253" exitCode=0 Nov 26 15:51:14 crc kubenswrapper[5010]: I1126 15:51:14.557661 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" event={"ID":"b4cd6af5-1fd1-450c-b157-009b6c25f21c","Type":"ContainerDied","Data":"21ca72119b84346f83405f5a18e259bae7b196004fd281f29d68a5586931f253"} Nov 26 15:51:15 crc kubenswrapper[5010]: I1126 15:51:15.300284 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.167:5353: connect: connection refused" Nov 26 15:51:15 crc kubenswrapper[5010]: I1126 15:51:15.569899 5010 generic.go:334] "Generic (PLEG): container finished" podID="e508c27c-9334-411e-9861-5be1672da574" containerID="969350a6fc0589ee4b7c24fd73c9e62a2fc1df10647d35c52db8cb05f1246d4f" exitCode=0 Nov 26 15:51:15 crc kubenswrapper[5010]: I1126 15:51:15.569946 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e508c27c-9334-411e-9861-5be1672da574","Type":"ContainerDied","Data":"969350a6fc0589ee4b7c24fd73c9e62a2fc1df10647d35c52db8cb05f1246d4f"} Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.323941 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.345282 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9xs6\" (UniqueName: \"kubernetes.io/projected/e508c27c-9334-411e-9861-5be1672da574-kube-api-access-g9xs6\") pod \"e508c27c-9334-411e-9861-5be1672da574\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.345699 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e508c27c-9334-411e-9861-5be1672da574-logs\") pod \"e508c27c-9334-411e-9861-5be1672da574\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.345801 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-combined-ca-bundle\") pod \"e508c27c-9334-411e-9861-5be1672da574\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.345846 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-config-data\") pod \"e508c27c-9334-411e-9861-5be1672da574\" (UID: \"e508c27c-9334-411e-9861-5be1672da574\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.347197 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e508c27c-9334-411e-9861-5be1672da574-logs" (OuterVolumeSpecName: "logs") pod "e508c27c-9334-411e-9861-5be1672da574" (UID: "e508c27c-9334-411e-9861-5be1672da574"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.355445 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e508c27c-9334-411e-9861-5be1672da574-kube-api-access-g9xs6" (OuterVolumeSpecName: "kube-api-access-g9xs6") pod "e508c27c-9334-411e-9861-5be1672da574" (UID: "e508c27c-9334-411e-9861-5be1672da574"). InnerVolumeSpecName "kube-api-access-g9xs6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.380557 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-config-data" (OuterVolumeSpecName: "config-data") pod "e508c27c-9334-411e-9861-5be1672da574" (UID: "e508c27c-9334-411e-9861-5be1672da574"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.386902 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e508c27c-9334-411e-9861-5be1672da574" (UID: "e508c27c-9334-411e-9861-5be1672da574"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.401539 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.447675 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-svc\") pod \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.447739 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-nb\") pod \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.447786 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-swift-storage-0\") pod \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.447937 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-scjh8\" (UniqueName: \"kubernetes.io/projected/b4cd6af5-1fd1-450c-b157-009b6c25f21c-kube-api-access-scjh8\") pod \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.447975 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-sb\") pod \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.448077 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-config\") pod \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\" (UID: \"b4cd6af5-1fd1-450c-b157-009b6c25f21c\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.448480 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9xs6\" (UniqueName: \"kubernetes.io/projected/e508c27c-9334-411e-9861-5be1672da574-kube-api-access-g9xs6\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.448502 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e508c27c-9334-411e-9861-5be1672da574-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.448512 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.448520 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e508c27c-9334-411e-9861-5be1672da574-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.461921 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4cd6af5-1fd1-450c-b157-009b6c25f21c-kube-api-access-scjh8" (OuterVolumeSpecName: "kube-api-access-scjh8") pod "b4cd6af5-1fd1-450c-b157-009b6c25f21c" (UID: "b4cd6af5-1fd1-450c-b157-009b6c25f21c"). InnerVolumeSpecName "kube-api-access-scjh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.498501 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-config" (OuterVolumeSpecName: "config") pod "b4cd6af5-1fd1-450c-b157-009b6c25f21c" (UID: "b4cd6af5-1fd1-450c-b157-009b6c25f21c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.504099 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b4cd6af5-1fd1-450c-b157-009b6c25f21c" (UID: "b4cd6af5-1fd1-450c-b157-009b6c25f21c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.504151 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b4cd6af5-1fd1-450c-b157-009b6c25f21c" (UID: "b4cd6af5-1fd1-450c-b157-009b6c25f21c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.533883 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b4cd6af5-1fd1-450c-b157-009b6c25f21c" (UID: "b4cd6af5-1fd1-450c-b157-009b6c25f21c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.539133 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b4cd6af5-1fd1-450c-b157-009b6c25f21c" (UID: "b4cd6af5-1fd1-450c-b157-009b6c25f21c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.550064 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-scjh8\" (UniqueName: \"kubernetes.io/projected/b4cd6af5-1fd1-450c-b157-009b6c25f21c-kube-api-access-scjh8\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.550092 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.550106 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.550114 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.550124 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.550133 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b4cd6af5-1fd1-450c-b157-009b6c25f21c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.592584 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" event={"ID":"b4cd6af5-1fd1-450c-b157-009b6c25f21c","Type":"ContainerDied","Data":"80e2c8578314b28a1542f5b8c39382694a83f68d39d42d4255e9aaab47e70039"} Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.592635 5010 scope.go:117] "RemoveContainer" containerID="21ca72119b84346f83405f5a18e259bae7b196004fd281f29d68a5586931f253" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.592775 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7b84d979b9-x2vg9" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.599629 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerStarted","Data":"63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32"} Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.611353 5010 generic.go:334] "Generic (PLEG): container finished" podID="3bb3c6b5-4b48-4354-bf61-2a9e4a250669" containerID="84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a" exitCode=0 Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.611433 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bb3c6b5-4b48-4354-bf61-2a9e4a250669","Type":"ContainerDied","Data":"84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a"} Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.613910 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e508c27c-9334-411e-9861-5be1672da574","Type":"ContainerDied","Data":"bd045f2b0970c5f8f13a1264775c6c0922966638c19f62efd5dd8bf35a470426"} Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.613987 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.638684 5010 scope.go:117] "RemoveContainer" containerID="bf4328bbab9dff42cb5204482ada1785957884bbeb2b0075628ff5c5b9b9a32d" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.686142 5010 scope.go:117] "RemoveContainer" containerID="969350a6fc0589ee4b7c24fd73c9e62a2fc1df10647d35c52db8cb05f1246d4f" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.704302 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7b84d979b9-x2vg9"] Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.718049 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7b84d979b9-x2vg9"] Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.727027 5010 scope.go:117] "RemoveContainer" containerID="daced953b28d667c9ccc7c8bf7f87db73db22d0e8c3015b55f0d059868a94d77" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.727036 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.735548 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.742196 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:16 crc kubenswrapper[5010]: E1126 15:51:16.742771 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerName="init" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.742789 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerName="init" Nov 26 15:51:16 crc kubenswrapper[5010]: E1126 15:51:16.742821 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-api" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.742830 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-api" Nov 26 15:51:16 crc kubenswrapper[5010]: E1126 15:51:16.742843 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerName="dnsmasq-dns" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.742850 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerName="dnsmasq-dns" Nov 26 15:51:16 crc kubenswrapper[5010]: E1126 15:51:16.742875 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-log" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.742881 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-log" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.743073 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" containerName="dnsmasq-dns" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.743103 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-log" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.743111 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e508c27c-9334-411e-9861-5be1672da574" containerName="nova-api-api" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.744196 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.759299 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.805603 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.811065 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.876902 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c7xsh\" (UniqueName: \"kubernetes.io/projected/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-kube-api-access-c7xsh\") pod \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.877256 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-config-data\") pod \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.877396 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-combined-ca-bundle\") pod \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\" (UID: \"3bb3c6b5-4b48-4354-bf61-2a9e4a250669\") " Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.877747 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfswk\" (UniqueName: \"kubernetes.io/projected/2c5dfa3f-21c4-461f-8f64-d3c7541859da-kube-api-access-sfswk\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.877803 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.877845 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c5dfa3f-21c4-461f-8f64-d3c7541859da-logs\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.877886 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-config-data\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.883877 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-kube-api-access-c7xsh" (OuterVolumeSpecName: "kube-api-access-c7xsh") pod "3bb3c6b5-4b48-4354-bf61-2a9e4a250669" (UID: "3bb3c6b5-4b48-4354-bf61-2a9e4a250669"). InnerVolumeSpecName "kube-api-access-c7xsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.910442 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-config-data" (OuterVolumeSpecName: "config-data") pod "3bb3c6b5-4b48-4354-bf61-2a9e4a250669" (UID: "3bb3c6b5-4b48-4354-bf61-2a9e4a250669"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.916834 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.916917 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.927952 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3bb3c6b5-4b48-4354-bf61-2a9e4a250669" (UID: "3bb3c6b5-4b48-4354-bf61-2a9e4a250669"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.980667 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfswk\" (UniqueName: \"kubernetes.io/projected/2c5dfa3f-21c4-461f-8f64-d3c7541859da-kube-api-access-sfswk\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.980792 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.980847 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c5dfa3f-21c4-461f-8f64-d3c7541859da-logs\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.980943 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-config-data\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.981078 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c7xsh\" (UniqueName: \"kubernetes.io/projected/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-kube-api-access-c7xsh\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.981094 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.981105 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb3c6b5-4b48-4354-bf61-2a9e4a250669-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.983318 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c5dfa3f-21c4-461f-8f64-d3c7541859da-logs\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.986077 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-config-data\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:16 crc kubenswrapper[5010]: I1126 15:51:16.987638 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.005353 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfswk\" (UniqueName: \"kubernetes.io/projected/2c5dfa3f-21c4-461f-8f64-d3c7541859da-kube-api-access-sfswk\") pod \"nova-api-0\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " pod="openstack/nova-api-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.088951 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.589108 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:17 crc kubenswrapper[5010]: W1126 15:51:17.603636 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c5dfa3f_21c4_461f_8f64_d3c7541859da.slice/crio-fcae1108e6e29225435f24a065ca3eefae6aa09d7bc2b4b315862a95d86f3913 WatchSource:0}: Error finding container fcae1108e6e29225435f24a065ca3eefae6aa09d7bc2b4b315862a95d86f3913: Status 404 returned error can't find the container with id fcae1108e6e29225435f24a065ca3eefae6aa09d7bc2b4b315862a95d86f3913 Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.646149 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.646163 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"3bb3c6b5-4b48-4354-bf61-2a9e4a250669","Type":"ContainerDied","Data":"05e35e406f855c827847648686b63c3dd5ea7ac140da35ed81f36b185989f588"} Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.646260 5010 scope.go:117] "RemoveContainer" containerID="84d2d55593bfadd13249bdfe40170d7734e5802897e69a14a173897de3ab8f4a" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.648409 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c5dfa3f-21c4-461f-8f64-d3c7541859da","Type":"ContainerStarted","Data":"fcae1108e6e29225435f24a065ca3eefae6aa09d7bc2b4b315862a95d86f3913"} Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.654502 5010 generic.go:334] "Generic (PLEG): container finished" podID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerID="63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32" exitCode=0 Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.654551 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerDied","Data":"63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32"} Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.712628 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.729353 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.739168 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:51:17 crc kubenswrapper[5010]: E1126 15:51:17.739703 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bb3c6b5-4b48-4354-bf61-2a9e4a250669" containerName="nova-scheduler-scheduler" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.739730 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bb3c6b5-4b48-4354-bf61-2a9e4a250669" containerName="nova-scheduler-scheduler" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.739910 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bb3c6b5-4b48-4354-bf61-2a9e4a250669" containerName="nova-scheduler-scheduler" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.740676 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.743031 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.750370 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.802082 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.802192 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-config-data\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.802270 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2d28\" (UniqueName: \"kubernetes.io/projected/94f927cc-9f34-4ba3-b122-a9be64300828-kube-api-access-b2d28\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.904082 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.904214 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-config-data\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.904309 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2d28\" (UniqueName: \"kubernetes.io/projected/94f927cc-9f34-4ba3-b122-a9be64300828-kube-api-access-b2d28\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.904659 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3bb3c6b5-4b48-4354-bf61-2a9e4a250669" path="/var/lib/kubelet/pods/3bb3c6b5-4b48-4354-bf61-2a9e4a250669/volumes" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.905914 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4cd6af5-1fd1-450c-b157-009b6c25f21c" path="/var/lib/kubelet/pods/b4cd6af5-1fd1-450c-b157-009b6c25f21c/volumes" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.907107 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e508c27c-9334-411e-9861-5be1672da574" path="/var/lib/kubelet/pods/e508c27c-9334-411e-9861-5be1672da574/volumes" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.910624 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-config-data\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.910630 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:17 crc kubenswrapper[5010]: I1126 15:51:17.920416 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2d28\" (UniqueName: \"kubernetes.io/projected/94f927cc-9f34-4ba3-b122-a9be64300828-kube-api-access-b2d28\") pod \"nova-scheduler-0\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " pod="openstack/nova-scheduler-0" Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.066048 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.296350 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.388631 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8gn7x"] Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.505106 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.505846 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="65356e91-f417-4d3c-8298-cd16cd182fea" containerName="kube-state-metrics" containerID="cri-o://974eb392789b3575b6ab91660a9720a82245412b282bb7c530b30d8feee0d0df" gracePeriod=30 Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.565114 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.666407 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"94f927cc-9f34-4ba3-b122-a9be64300828","Type":"ContainerStarted","Data":"ee2537137cefc2526c532ae5b3a701444d28b884850a6ab7011b4edd287fc923"} Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.668902 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerStarted","Data":"621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a"} Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.690316 5010 generic.go:334] "Generic (PLEG): container finished" podID="65356e91-f417-4d3c-8298-cd16cd182fea" containerID="974eb392789b3575b6ab91660a9720a82245412b282bb7c530b30d8feee0d0df" exitCode=2 Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.690413 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65356e91-f417-4d3c-8298-cd16cd182fea","Type":"ContainerDied","Data":"974eb392789b3575b6ab91660a9720a82245412b282bb7c530b30d8feee0d0df"} Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.693315 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8gn7x" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="registry-server" containerID="cri-o://5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145" gracePeriod=2 Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.693592 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c5dfa3f-21c4-461f-8f64-d3c7541859da","Type":"ContainerStarted","Data":"7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48"} Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.693647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c5dfa3f-21c4-461f-8f64-d3c7541859da","Type":"ContainerStarted","Data":"97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0"} Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.704796 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-csc9n" podStartSLOduration=2.84533221 podStartE2EDuration="10.704772882s" podCreationTimestamp="2025-11-26 15:51:08 +0000 UTC" firstStartedPulling="2025-11-26 15:51:10.466450773 +0000 UTC m=+1491.257167921" lastFinishedPulling="2025-11-26 15:51:18.325891445 +0000 UTC m=+1499.116608593" observedRunningTime="2025-11-26 15:51:18.692641408 +0000 UTC m=+1499.483358556" watchObservedRunningTime="2025-11-26 15:51:18.704772882 +0000 UTC m=+1499.495490050" Nov 26 15:51:18 crc kubenswrapper[5010]: I1126 15:51:18.716196 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.7161751069999998 podStartE2EDuration="2.716175107s" podCreationTimestamp="2025-11-26 15:51:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:18.714155626 +0000 UTC m=+1499.504872784" watchObservedRunningTime="2025-11-26 15:51:18.716175107 +0000 UTC m=+1499.506892265" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.058584 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.138015 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9lz4\" (UniqueName: \"kubernetes.io/projected/65356e91-f417-4d3c-8298-cd16cd182fea-kube-api-access-s9lz4\") pod \"65356e91-f417-4d3c-8298-cd16cd182fea\" (UID: \"65356e91-f417-4d3c-8298-cd16cd182fea\") " Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.158059 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65356e91-f417-4d3c-8298-cd16cd182fea-kube-api-access-s9lz4" (OuterVolumeSpecName: "kube-api-access-s9lz4") pod "65356e91-f417-4d3c-8298-cd16cd182fea" (UID: "65356e91-f417-4d3c-8298-cd16cd182fea"). InnerVolumeSpecName "kube-api-access-s9lz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.242826 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9lz4\" (UniqueName: \"kubernetes.io/projected/65356e91-f417-4d3c-8298-cd16cd182fea-kube-api-access-s9lz4\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.266613 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.343925 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-catalog-content\") pod \"15e0c240-1511-4902-9893-a3f9dd146c8a\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.343987 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/15e0c240-1511-4902-9893-a3f9dd146c8a-kube-api-access-r8zs8\") pod \"15e0c240-1511-4902-9893-a3f9dd146c8a\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.344285 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-utilities\") pod \"15e0c240-1511-4902-9893-a3f9dd146c8a\" (UID: \"15e0c240-1511-4902-9893-a3f9dd146c8a\") " Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.345210 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-utilities" (OuterVolumeSpecName: "utilities") pod "15e0c240-1511-4902-9893-a3f9dd146c8a" (UID: "15e0c240-1511-4902-9893-a3f9dd146c8a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.355069 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/15e0c240-1511-4902-9893-a3f9dd146c8a-kube-api-access-r8zs8" (OuterVolumeSpecName: "kube-api-access-r8zs8") pod "15e0c240-1511-4902-9893-a3f9dd146c8a" (UID: "15e0c240-1511-4902-9893-a3f9dd146c8a"). InnerVolumeSpecName "kube-api-access-r8zs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.359272 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "15e0c240-1511-4902-9893-a3f9dd146c8a" (UID: "15e0c240-1511-4902-9893-a3f9dd146c8a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.447098 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.447130 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r8zs8\" (UniqueName: \"kubernetes.io/projected/15e0c240-1511-4902-9893-a3f9dd146c8a-kube-api-access-r8zs8\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.447142 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/15e0c240-1511-4902-9893-a3f9dd146c8a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.705938 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"65356e91-f417-4d3c-8298-cd16cd182fea","Type":"ContainerDied","Data":"50eddd0272617a2bdb446683f931db808bc6755f4e13c7add8fde594afd09c44"} Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.705969 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.706003 5010 scope.go:117] "RemoveContainer" containerID="974eb392789b3575b6ab91660a9720a82245412b282bb7c530b30d8feee0d0df" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.710219 5010 generic.go:334] "Generic (PLEG): container finished" podID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerID="5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145" exitCode=0 Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.710301 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8gn7x" event={"ID":"15e0c240-1511-4902-9893-a3f9dd146c8a","Type":"ContainerDied","Data":"5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145"} Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.710354 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8gn7x" event={"ID":"15e0c240-1511-4902-9893-a3f9dd146c8a","Type":"ContainerDied","Data":"c6f5b7101fb817b6fa0c009740cf75cba79fcf9b46703077e9eb4446f3c34a53"} Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.710365 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8gn7x" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.716234 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"94f927cc-9f34-4ba3-b122-a9be64300828","Type":"ContainerStarted","Data":"9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769"} Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.733043 5010 scope.go:117] "RemoveContainer" containerID="5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.742625 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.742599233 podStartE2EDuration="2.742599233s" podCreationTimestamp="2025-11-26 15:51:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:19.738652274 +0000 UTC m=+1500.529369422" watchObservedRunningTime="2025-11-26 15:51:19.742599233 +0000 UTC m=+1500.533316391" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.766161 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8gn7x"] Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.768001 5010 scope.go:117] "RemoveContainer" containerID="1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.781769 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8gn7x"] Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.791274 5010 scope.go:117] "RemoveContainer" containerID="6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.796489 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.814109 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.822424 5010 scope.go:117] "RemoveContainer" containerID="5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.822528 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:51:19 crc kubenswrapper[5010]: E1126 15:51:19.823015 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="registry-server" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.823034 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="registry-server" Nov 26 15:51:19 crc kubenswrapper[5010]: E1126 15:51:19.823047 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65356e91-f417-4d3c-8298-cd16cd182fea" containerName="kube-state-metrics" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.823065 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="65356e91-f417-4d3c-8298-cd16cd182fea" containerName="kube-state-metrics" Nov 26 15:51:19 crc kubenswrapper[5010]: E1126 15:51:19.823078 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="extract-content" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.823083 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="extract-content" Nov 26 15:51:19 crc kubenswrapper[5010]: E1126 15:51:19.823114 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="extract-utilities" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.823122 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="extract-utilities" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.823341 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="65356e91-f417-4d3c-8298-cd16cd182fea" containerName="kube-state-metrics" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.823372 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" containerName="registry-server" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.824107 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:51:19 crc kubenswrapper[5010]: E1126 15:51:19.825142 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145\": container with ID starting with 5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145 not found: ID does not exist" containerID="5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.825172 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145"} err="failed to get container status \"5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145\": rpc error: code = NotFound desc = could not find container \"5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145\": container with ID starting with 5d9c69f48fc0b684e961d11f94abf9475479c82e0431f836deb3ac0a5525e145 not found: ID does not exist" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.825194 5010 scope.go:117] "RemoveContainer" containerID="1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.828457 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.828473 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 15:51:19 crc kubenswrapper[5010]: E1126 15:51:19.828843 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107\": container with ID starting with 1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107 not found: ID does not exist" containerID="1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.828885 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107"} err="failed to get container status \"1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107\": rpc error: code = NotFound desc = could not find container \"1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107\": container with ID starting with 1d67280c24e4f7986073a61268821143d62015cea4d67fbd08c98da681fb9107 not found: ID does not exist" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.828915 5010 scope.go:117] "RemoveContainer" containerID="6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373" Nov 26 15:51:19 crc kubenswrapper[5010]: E1126 15:51:19.829690 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373\": container with ID starting with 6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373 not found: ID does not exist" containerID="6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.829750 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373"} err="failed to get container status \"6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373\": rpc error: code = NotFound desc = could not find container \"6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373\": container with ID starting with 6016a84d0eb9c4c0ccecfc6a368e43402ef8715792834ac8899ab5541a2bd373 not found: ID does not exist" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.831123 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.906292 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="15e0c240-1511-4902-9893-a3f9dd146c8a" path="/var/lib/kubelet/pods/15e0c240-1511-4902-9893-a3f9dd146c8a/volumes" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.906923 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65356e91-f417-4d3c-8298-cd16cd182fea" path="/var/lib/kubelet/pods/65356e91-f417-4d3c-8298-cd16cd182fea/volumes" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.956276 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.956465 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.956509 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pdvl\" (UniqueName: \"kubernetes.io/projected/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-api-access-9pdvl\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:19 crc kubenswrapper[5010]: I1126 15:51:19.956552 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.057891 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.058030 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.058072 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pdvl\" (UniqueName: \"kubernetes.io/projected/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-api-access-9pdvl\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.058122 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.061503 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.069100 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.069183 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.073399 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pdvl\" (UniqueName: \"kubernetes.io/projected/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-api-access-9pdvl\") pod \"kube-state-metrics-0\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.150827 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.625938 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.626959 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-central-agent" containerID="cri-o://be84644312daa0eeaad435cd97f4bf9e69ce0fec67353d644d52be09ca1dda14" gracePeriod=30 Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.627179 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="proxy-httpd" containerID="cri-o://d54a2b4407680a0f0c71ee70b5d943e7422cb44f2affa01809599bc613c10966" gracePeriod=30 Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.627323 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="sg-core" containerID="cri-o://2b805e0f105e16a084cd77b2e2d7a3e9d50e1467ddbfd960127ae1af4d8e50b6" gracePeriod=30 Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.627322 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-notification-agent" containerID="cri-o://fec477bfab8c2e30ff3f25a68c1456490f0eea9398ae0047823a6fb6369ffbd8" gracePeriod=30 Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.667150 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:51:20 crc kubenswrapper[5010]: W1126 15:51:20.673383 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded209eb8_b2b9_4101_9eda_2762259ea2cd.slice/crio-1fde3fb5e760b57b7690e7599878dc5640e2635c090d3aeb394a3d92b3322e48 WatchSource:0}: Error finding container 1fde3fb5e760b57b7690e7599878dc5640e2635c090d3aeb394a3d92b3322e48: Status 404 returned error can't find the container with id 1fde3fb5e760b57b7690e7599878dc5640e2635c090d3aeb394a3d92b3322e48 Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.730725 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ed209eb8-b2b9-4101-9eda-2762259ea2cd","Type":"ContainerStarted","Data":"1fde3fb5e760b57b7690e7599878dc5640e2635c090d3aeb394a3d92b3322e48"} Nov 26 15:51:20 crc kubenswrapper[5010]: I1126 15:51:20.887898 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.750431 5010 generic.go:334] "Generic (PLEG): container finished" podID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerID="d54a2b4407680a0f0c71ee70b5d943e7422cb44f2affa01809599bc613c10966" exitCode=0 Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.750772 5010 generic.go:334] "Generic (PLEG): container finished" podID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerID="2b805e0f105e16a084cd77b2e2d7a3e9d50e1467ddbfd960127ae1af4d8e50b6" exitCode=2 Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.750787 5010 generic.go:334] "Generic (PLEG): container finished" podID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerID="be84644312daa0eeaad435cd97f4bf9e69ce0fec67353d644d52be09ca1dda14" exitCode=0 Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.750699 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerDied","Data":"d54a2b4407680a0f0c71ee70b5d943e7422cb44f2affa01809599bc613c10966"} Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.750894 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerDied","Data":"2b805e0f105e16a084cd77b2e2d7a3e9d50e1467ddbfd960127ae1af4d8e50b6"} Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.750917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerDied","Data":"be84644312daa0eeaad435cd97f4bf9e69ce0fec67353d644d52be09ca1dda14"} Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.753688 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ed209eb8-b2b9-4101-9eda-2762259ea2cd","Type":"ContainerStarted","Data":"1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56"} Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.755362 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.787961 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.393978383 podStartE2EDuration="2.787936717s" podCreationTimestamp="2025-11-26 15:51:19 +0000 UTC" firstStartedPulling="2025-11-26 15:51:20.677653364 +0000 UTC m=+1501.468370512" lastFinishedPulling="2025-11-26 15:51:21.071611698 +0000 UTC m=+1501.862328846" observedRunningTime="2025-11-26 15:51:21.776528932 +0000 UTC m=+1502.567246090" watchObservedRunningTime="2025-11-26 15:51:21.787936717 +0000 UTC m=+1502.578653875" Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.916377 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 15:51:21 crc kubenswrapper[5010]: I1126 15:51:21.916441 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 15:51:22 crc kubenswrapper[5010]: I1126 15:51:22.932120 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:22 crc kubenswrapper[5010]: I1126 15:51:22.932149 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:23 crc kubenswrapper[5010]: I1126 15:51:23.066296 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.791006 5010 generic.go:334] "Generic (PLEG): container finished" podID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerID="fec477bfab8c2e30ff3f25a68c1456490f0eea9398ae0047823a6fb6369ffbd8" exitCode=0 Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.791053 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerDied","Data":"fec477bfab8c2e30ff3f25a68c1456490f0eea9398ae0047823a6fb6369ffbd8"} Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.878357 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.956739 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-config-data\") pod \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.956914 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kktb5\" (UniqueName: \"kubernetes.io/projected/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-kube-api-access-kktb5\") pod \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.957023 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-run-httpd\") pod \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.957056 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-log-httpd\") pod \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.957118 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-combined-ca-bundle\") pod \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.957218 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-scripts\") pod \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.957262 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-sg-core-conf-yaml\") pod \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\" (UID: \"1db39e6e-dc82-4ded-8c69-37ba0746ab3e\") " Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.966052 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1db39e6e-dc82-4ded-8c69-37ba0746ab3e" (UID: "1db39e6e-dc82-4ded-8c69-37ba0746ab3e"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.967926 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1db39e6e-dc82-4ded-8c69-37ba0746ab3e" (UID: "1db39e6e-dc82-4ded-8c69-37ba0746ab3e"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.973550 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-kube-api-access-kktb5" (OuterVolumeSpecName: "kube-api-access-kktb5") pod "1db39e6e-dc82-4ded-8c69-37ba0746ab3e" (UID: "1db39e6e-dc82-4ded-8c69-37ba0746ab3e"). InnerVolumeSpecName "kube-api-access-kktb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.988106 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-scripts" (OuterVolumeSpecName: "scripts") pod "1db39e6e-dc82-4ded-8c69-37ba0746ab3e" (UID: "1db39e6e-dc82-4ded-8c69-37ba0746ab3e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:24 crc kubenswrapper[5010]: I1126 15:51:24.999237 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1db39e6e-dc82-4ded-8c69-37ba0746ab3e" (UID: "1db39e6e-dc82-4ded-8c69-37ba0746ab3e"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.062248 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kktb5\" (UniqueName: \"kubernetes.io/projected/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-kube-api-access-kktb5\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.062290 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.062306 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.062322 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.062336 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.079178 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1db39e6e-dc82-4ded-8c69-37ba0746ab3e" (UID: "1db39e6e-dc82-4ded-8c69-37ba0746ab3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.079777 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-config-data" (OuterVolumeSpecName: "config-data") pod "1db39e6e-dc82-4ded-8c69-37ba0746ab3e" (UID: "1db39e6e-dc82-4ded-8c69-37ba0746ab3e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.164513 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.164575 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1db39e6e-dc82-4ded-8c69-37ba0746ab3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.807116 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1db39e6e-dc82-4ded-8c69-37ba0746ab3e","Type":"ContainerDied","Data":"0bc9cb14f1a778f2264cd2f64d82fa14267ec51371703b737b87a440853ea58f"} Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.807209 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.807496 5010 scope.go:117] "RemoveContainer" containerID="d54a2b4407680a0f0c71ee70b5d943e7422cb44f2affa01809599bc613c10966" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.843423 5010 scope.go:117] "RemoveContainer" containerID="2b805e0f105e16a084cd77b2e2d7a3e9d50e1467ddbfd960127ae1af4d8e50b6" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.848702 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.863193 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.874473 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:25 crc kubenswrapper[5010]: E1126 15:51:25.875032 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="sg-core" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.875051 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="sg-core" Nov 26 15:51:25 crc kubenswrapper[5010]: E1126 15:51:25.875070 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-central-agent" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.875080 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-central-agent" Nov 26 15:51:25 crc kubenswrapper[5010]: E1126 15:51:25.875106 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-notification-agent" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.875116 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-notification-agent" Nov 26 15:51:25 crc kubenswrapper[5010]: E1126 15:51:25.876667 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="proxy-httpd" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.876686 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="proxy-httpd" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.877055 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-notification-agent" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.877076 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="ceilometer-central-agent" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.877106 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="proxy-httpd" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.877122 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" containerName="sg-core" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.881988 5010 scope.go:117] "RemoveContainer" containerID="fec477bfab8c2e30ff3f25a68c1456490f0eea9398ae0047823a6fb6369ffbd8" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.882523 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.884220 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.886019 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.889157 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.894085 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.918137 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1db39e6e-dc82-4ded-8c69-37ba0746ab3e" path="/var/lib/kubelet/pods/1db39e6e-dc82-4ded-8c69-37ba0746ab3e/volumes" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.925455 5010 scope.go:117] "RemoveContainer" containerID="be84644312daa0eeaad435cd97f4bf9e69ce0fec67353d644d52be09ca1dda14" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984038 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-log-httpd\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984095 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-run-httpd\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984137 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-config-data\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984522 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-scripts\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984636 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984703 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984776 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbf55\" (UniqueName: \"kubernetes.io/projected/741560a9-42dc-4344-9875-93eb97870572-kube-api-access-pbf55\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:25 crc kubenswrapper[5010]: I1126 15:51:25.984984 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086680 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-scripts\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086753 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086786 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086817 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbf55\" (UniqueName: \"kubernetes.io/projected/741560a9-42dc-4344-9875-93eb97870572-kube-api-access-pbf55\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086873 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086923 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-log-httpd\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086954 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-run-httpd\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.086987 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-config-data\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.087975 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-log-httpd\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.088150 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-run-httpd\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.095009 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.095235 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-scripts\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.095214 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.095967 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.097093 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-config-data\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.121526 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbf55\" (UniqueName: \"kubernetes.io/projected/741560a9-42dc-4344-9875-93eb97870572-kube-api-access-pbf55\") pod \"ceilometer-0\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.209999 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:26 crc kubenswrapper[5010]: W1126 15:51:26.724664 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod741560a9_42dc_4344_9875_93eb97870572.slice/crio-c982abef584ce9a7e52dfeff89c2f633502fdfb8806ae5fffdbae1aeb6832fa9 WatchSource:0}: Error finding container c982abef584ce9a7e52dfeff89c2f633502fdfb8806ae5fffdbae1aeb6832fa9: Status 404 returned error can't find the container with id c982abef584ce9a7e52dfeff89c2f633502fdfb8806ae5fffdbae1aeb6832fa9 Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.727034 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:26 crc kubenswrapper[5010]: I1126 15:51:26.863164 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerStarted","Data":"c982abef584ce9a7e52dfeff89c2f633502fdfb8806ae5fffdbae1aeb6832fa9"} Nov 26 15:51:27 crc kubenswrapper[5010]: I1126 15:51:27.089429 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:51:27 crc kubenswrapper[5010]: I1126 15:51:27.089813 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.067005 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.097300 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.172075 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.172112 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.572753 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.573189 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.652900 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.887858 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerStarted","Data":"eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9"} Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.887921 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerStarted","Data":"573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc"} Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.927634 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 15:51:28 crc kubenswrapper[5010]: I1126 15:51:28.954688 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.045070 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-csc9n"] Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.087133 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pn4nw"] Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.088297 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-pn4nw" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="registry-server" containerID="cri-o://9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677" gracePeriod=2 Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.686110 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.788437 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zq74c\" (UniqueName: \"kubernetes.io/projected/22f55318-df99-4764-82aa-2240fea5d0ca-kube-api-access-zq74c\") pod \"22f55318-df99-4764-82aa-2240fea5d0ca\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.788495 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-utilities\") pod \"22f55318-df99-4764-82aa-2240fea5d0ca\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.788694 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-catalog-content\") pod \"22f55318-df99-4764-82aa-2240fea5d0ca\" (UID: \"22f55318-df99-4764-82aa-2240fea5d0ca\") " Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.792515 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22f55318-df99-4764-82aa-2240fea5d0ca-kube-api-access-zq74c" (OuterVolumeSpecName: "kube-api-access-zq74c") pod "22f55318-df99-4764-82aa-2240fea5d0ca" (UID: "22f55318-df99-4764-82aa-2240fea5d0ca"). InnerVolumeSpecName "kube-api-access-zq74c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.792929 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-utilities" (OuterVolumeSpecName: "utilities") pod "22f55318-df99-4764-82aa-2240fea5d0ca" (UID: "22f55318-df99-4764-82aa-2240fea5d0ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.851350 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "22f55318-df99-4764-82aa-2240fea5d0ca" (UID: "22f55318-df99-4764-82aa-2240fea5d0ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.892542 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.892583 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zq74c\" (UniqueName: \"kubernetes.io/projected/22f55318-df99-4764-82aa-2240fea5d0ca-kube-api-access-zq74c\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.892597 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22f55318-df99-4764-82aa-2240fea5d0ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.902519 5010 generic.go:334] "Generic (PLEG): container finished" podID="22f55318-df99-4764-82aa-2240fea5d0ca" containerID="9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677" exitCode=0 Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.902671 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-pn4nw" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.912185 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pn4nw" event={"ID":"22f55318-df99-4764-82aa-2240fea5d0ca","Type":"ContainerDied","Data":"9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677"} Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.912241 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-pn4nw" event={"ID":"22f55318-df99-4764-82aa-2240fea5d0ca","Type":"ContainerDied","Data":"7c5132ca3d8968373b65bafb1e9ae6ad1e8f8ccda975d5afa499f61ac81ba390"} Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.912257 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerStarted","Data":"4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21"} Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.912283 5010 scope.go:117] "RemoveContainer" containerID="9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.932858 5010 scope.go:117] "RemoveContainer" containerID="35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.976351 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-pn4nw"] Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.984890 5010 scope.go:117] "RemoveContainer" containerID="b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a" Nov 26 15:51:29 crc kubenswrapper[5010]: I1126 15:51:29.991337 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-pn4nw"] Nov 26 15:51:30 crc kubenswrapper[5010]: I1126 15:51:30.029520 5010 scope.go:117] "RemoveContainer" containerID="9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677" Nov 26 15:51:30 crc kubenswrapper[5010]: E1126 15:51:30.030071 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677\": container with ID starting with 9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677 not found: ID does not exist" containerID="9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677" Nov 26 15:51:30 crc kubenswrapper[5010]: I1126 15:51:30.030172 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677"} err="failed to get container status \"9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677\": rpc error: code = NotFound desc = could not find container \"9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677\": container with ID starting with 9caf8d1b3f4ff4a94ba7eb2c46db49a9d8593ad8b4fc7f4cd232e4001c9fd677 not found: ID does not exist" Nov 26 15:51:30 crc kubenswrapper[5010]: I1126 15:51:30.030256 5010 scope.go:117] "RemoveContainer" containerID="35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c" Nov 26 15:51:30 crc kubenswrapper[5010]: E1126 15:51:30.030633 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c\": container with ID starting with 35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c not found: ID does not exist" containerID="35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c" Nov 26 15:51:30 crc kubenswrapper[5010]: I1126 15:51:30.030679 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c"} err="failed to get container status \"35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c\": rpc error: code = NotFound desc = could not find container \"35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c\": container with ID starting with 35b3a607a2dfe47ef25342848b1408ecf1f6e29a80ea59f5d7475e8602bfc85c not found: ID does not exist" Nov 26 15:51:30 crc kubenswrapper[5010]: I1126 15:51:30.030726 5010 scope.go:117] "RemoveContainer" containerID="b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a" Nov 26 15:51:30 crc kubenswrapper[5010]: E1126 15:51:30.031097 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a\": container with ID starting with b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a not found: ID does not exist" containerID="b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a" Nov 26 15:51:30 crc kubenswrapper[5010]: I1126 15:51:30.031132 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a"} err="failed to get container status \"b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a\": rpc error: code = NotFound desc = could not find container \"b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a\": container with ID starting with b5852906ef538c66bf1779fbfc87279c331a610eb689b31029c01b1a9114081a not found: ID does not exist" Nov 26 15:51:30 crc kubenswrapper[5010]: I1126 15:51:30.163026 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 15:51:31 crc kubenswrapper[5010]: I1126 15:51:31.912340 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" path="/var/lib/kubelet/pods/22f55318-df99-4764-82aa-2240fea5d0ca/volumes" Nov 26 15:51:31 crc kubenswrapper[5010]: I1126 15:51:31.920734 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 15:51:31 crc kubenswrapper[5010]: I1126 15:51:31.927865 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 15:51:31 crc kubenswrapper[5010]: I1126 15:51:31.932905 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 15:51:31 crc kubenswrapper[5010]: I1126 15:51:31.935920 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerStarted","Data":"f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f"} Nov 26 15:51:31 crc kubenswrapper[5010]: I1126 15:51:31.936279 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 15:51:31 crc kubenswrapper[5010]: I1126 15:51:31.967176 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.650354194 podStartE2EDuration="6.967153868s" podCreationTimestamp="2025-11-26 15:51:25 +0000 UTC" firstStartedPulling="2025-11-26 15:51:26.727221562 +0000 UTC m=+1507.517938710" lastFinishedPulling="2025-11-26 15:51:31.044021226 +0000 UTC m=+1511.834738384" observedRunningTime="2025-11-26 15:51:31.964005009 +0000 UTC m=+1512.754722177" watchObservedRunningTime="2025-11-26 15:51:31.967153868 +0000 UTC m=+1512.757871016" Nov 26 15:51:32 crc kubenswrapper[5010]: I1126 15:51:32.957226 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.815104 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.928911 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-combined-ca-bundle\") pod \"7416391c-360d-4bcd-9cfa-6977446520ed\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.929482 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-config-data\") pod \"7416391c-360d-4bcd-9cfa-6977446520ed\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.929755 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktl4x\" (UniqueName: \"kubernetes.io/projected/7416391c-360d-4bcd-9cfa-6977446520ed-kube-api-access-ktl4x\") pod \"7416391c-360d-4bcd-9cfa-6977446520ed\" (UID: \"7416391c-360d-4bcd-9cfa-6977446520ed\") " Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.937495 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7416391c-360d-4bcd-9cfa-6977446520ed-kube-api-access-ktl4x" (OuterVolumeSpecName: "kube-api-access-ktl4x") pod "7416391c-360d-4bcd-9cfa-6977446520ed" (UID: "7416391c-360d-4bcd-9cfa-6977446520ed"). InnerVolumeSpecName "kube-api-access-ktl4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.967575 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-config-data" (OuterVolumeSpecName: "config-data") pod "7416391c-360d-4bcd-9cfa-6977446520ed" (UID: "7416391c-360d-4bcd-9cfa-6977446520ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.980241 5010 generic.go:334] "Generic (PLEG): container finished" podID="7416391c-360d-4bcd-9cfa-6977446520ed" containerID="22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389" exitCode=137 Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.980285 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7416391c-360d-4bcd-9cfa-6977446520ed","Type":"ContainerDied","Data":"22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389"} Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.980315 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7416391c-360d-4bcd-9cfa-6977446520ed","Type":"ContainerDied","Data":"978aa98c8f44add19aa0ebe77bceff4a1aed24cba20df0f3983921cd2f7adf68"} Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.980333 5010 scope.go:117] "RemoveContainer" containerID="22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389" Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.980481 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:35 crc kubenswrapper[5010]: I1126 15:51:35.982102 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7416391c-360d-4bcd-9cfa-6977446520ed" (UID: "7416391c-360d-4bcd-9cfa-6977446520ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.033275 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktl4x\" (UniqueName: \"kubernetes.io/projected/7416391c-360d-4bcd-9cfa-6977446520ed-kube-api-access-ktl4x\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.033637 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.033892 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7416391c-360d-4bcd-9cfa-6977446520ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.047096 5010 scope.go:117] "RemoveContainer" containerID="22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389" Nov 26 15:51:36 crc kubenswrapper[5010]: E1126 15:51:36.047603 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389\": container with ID starting with 22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389 not found: ID does not exist" containerID="22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.047638 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389"} err="failed to get container status \"22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389\": rpc error: code = NotFound desc = could not find container \"22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389\": container with ID starting with 22c16b27c447f17d3eb8bd44b4ca814276bc886eb1dcd09abbd0959023fe9389 not found: ID does not exist" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.331056 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.349596 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.364003 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:51:36 crc kubenswrapper[5010]: E1126 15:51:36.364562 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="registry-server" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.364585 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="registry-server" Nov 26 15:51:36 crc kubenswrapper[5010]: E1126 15:51:36.364609 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="extract-content" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.364617 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="extract-content" Nov 26 15:51:36 crc kubenswrapper[5010]: E1126 15:51:36.364633 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7416391c-360d-4bcd-9cfa-6977446520ed" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.364641 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7416391c-360d-4bcd-9cfa-6977446520ed" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 15:51:36 crc kubenswrapper[5010]: E1126 15:51:36.364674 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="extract-utilities" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.364684 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="extract-utilities" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.365014 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="22f55318-df99-4764-82aa-2240fea5d0ca" containerName="registry-server" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.365031 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7416391c-360d-4bcd-9cfa-6977446520ed" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.365952 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.369643 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.369766 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.369656 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.375461 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.442500 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.442566 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.442775 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.442905 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.443109 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6cdk\" (UniqueName: \"kubernetes.io/projected/e618fcce-218b-4f09-a0ae-5cad873d9aab-kube-api-access-m6cdk\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.544662 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6cdk\" (UniqueName: \"kubernetes.io/projected/e618fcce-218b-4f09-a0ae-5cad873d9aab-kube-api-access-m6cdk\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.545450 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.545785 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.546282 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.546626 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.552648 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.555296 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.559303 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.560257 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.570375 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6cdk\" (UniqueName: \"kubernetes.io/projected/e618fcce-218b-4f09-a0ae-5cad873d9aab-kube-api-access-m6cdk\") pod \"nova-cell1-novncproxy-0\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:36 crc kubenswrapper[5010]: I1126 15:51:36.696759 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.093193 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.093578 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.093913 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.093968 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.097056 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.097984 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.187169 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:51:37 crc kubenswrapper[5010]: W1126 15:51:37.189111 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode618fcce_218b_4f09_a0ae_5cad873d9aab.slice/crio-d5d1fa08257d3909af44a34f881df9f2a14ad0167e2dcc341fe242a4a0df45dd WatchSource:0}: Error finding container d5d1fa08257d3909af44a34f881df9f2a14ad0167e2dcc341fe242a4a0df45dd: Status 404 returned error can't find the container with id d5d1fa08257d3909af44a34f881df9f2a14ad0167e2dcc341fe242a4a0df45dd Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.307355 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-755bdc5489-xpxxn"] Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.310005 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.341603 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-755bdc5489-xpxxn"] Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.363641 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-config\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.363800 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-sb\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.364619 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-swift-storage-0\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.364669 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-svc\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.364737 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6s675\" (UniqueName: \"kubernetes.io/projected/7e581b31-6b6d-4e32-8775-3446bcf717d9-kube-api-access-6s675\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.364759 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-nb\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.467801 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-swift-storage-0\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.468108 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-svc\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.468163 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6s675\" (UniqueName: \"kubernetes.io/projected/7e581b31-6b6d-4e32-8775-3446bcf717d9-kube-api-access-6s675\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.468186 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-nb\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.468261 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-config\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.468298 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-sb\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.468728 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-swift-storage-0\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.469530 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-sb\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.471445 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-svc\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.474852 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-nb\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.475769 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-config\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.485504 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6s675\" (UniqueName: \"kubernetes.io/projected/7e581b31-6b6d-4e32-8775-3446bcf717d9-kube-api-access-6s675\") pod \"dnsmasq-dns-755bdc5489-xpxxn\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:37 crc kubenswrapper[5010]: I1126 15:51:37.646238 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:38 crc kubenswrapper[5010]: I1126 15:51:37.914434 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7416391c-360d-4bcd-9cfa-6977446520ed" path="/var/lib/kubelet/pods/7416391c-360d-4bcd-9cfa-6977446520ed/volumes" Nov 26 15:51:38 crc kubenswrapper[5010]: I1126 15:51:38.021226 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e618fcce-218b-4f09-a0ae-5cad873d9aab","Type":"ContainerStarted","Data":"326a0e69015f09983f8703d9758f4b2d20607b2b6caf77a2247c63a4d0a164fa"} Nov 26 15:51:38 crc kubenswrapper[5010]: I1126 15:51:38.021263 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e618fcce-218b-4f09-a0ae-5cad873d9aab","Type":"ContainerStarted","Data":"d5d1fa08257d3909af44a34f881df9f2a14ad0167e2dcc341fe242a4a0df45dd"} Nov 26 15:51:38 crc kubenswrapper[5010]: I1126 15:51:38.065229 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.065206189 podStartE2EDuration="2.065206189s" podCreationTimestamp="2025-11-26 15:51:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:38.046542683 +0000 UTC m=+1518.837259831" watchObservedRunningTime="2025-11-26 15:51:38.065206189 +0000 UTC m=+1518.855923327" Nov 26 15:51:38 crc kubenswrapper[5010]: W1126 15:51:38.130373 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7e581b31_6b6d_4e32_8775_3446bcf717d9.slice/crio-a74e79376bec79612d98c04fba6caef1dd6b711a147e6145a27e98d5f0838c89 WatchSource:0}: Error finding container a74e79376bec79612d98c04fba6caef1dd6b711a147e6145a27e98d5f0838c89: Status 404 returned error can't find the container with id a74e79376bec79612d98c04fba6caef1dd6b711a147e6145a27e98d5f0838c89 Nov 26 15:51:38 crc kubenswrapper[5010]: I1126 15:51:38.130849 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-755bdc5489-xpxxn"] Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.029888 5010 generic.go:334] "Generic (PLEG): container finished" podID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerID="3721092d2f588508ae5f89654f3575bb94f0470deb70912296aabc4108ca40d2" exitCode=0 Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.030105 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" event={"ID":"7e581b31-6b6d-4e32-8775-3446bcf717d9","Type":"ContainerDied","Data":"3721092d2f588508ae5f89654f3575bb94f0470deb70912296aabc4108ca40d2"} Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.031390 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" event={"ID":"7e581b31-6b6d-4e32-8775-3446bcf717d9","Type":"ContainerStarted","Data":"a74e79376bec79612d98c04fba6caef1dd6b711a147e6145a27e98d5f0838c89"} Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.601581 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.602548 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-central-agent" containerID="cri-o://573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc" gracePeriod=30 Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.602738 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="sg-core" containerID="cri-o://4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21" gracePeriod=30 Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.602765 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="proxy-httpd" containerID="cri-o://f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f" gracePeriod=30 Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.602748 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-notification-agent" containerID="cri-o://eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9" gracePeriod=30 Nov 26 15:51:39 crc kubenswrapper[5010]: I1126 15:51:39.985117 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.043965 5010 generic.go:334] "Generic (PLEG): container finished" podID="741560a9-42dc-4344-9875-93eb97870572" containerID="f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f" exitCode=0 Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.044002 5010 generic.go:334] "Generic (PLEG): container finished" podID="741560a9-42dc-4344-9875-93eb97870572" containerID="4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21" exitCode=2 Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.044016 5010 generic.go:334] "Generic (PLEG): container finished" podID="741560a9-42dc-4344-9875-93eb97870572" containerID="573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc" exitCode=0 Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.044056 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerDied","Data":"f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f"} Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.044113 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerDied","Data":"4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21"} Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.044138 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerDied","Data":"573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc"} Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.046335 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-log" containerID="cri-o://97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0" gracePeriod=30 Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.047144 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" event={"ID":"7e581b31-6b6d-4e32-8775-3446bcf717d9","Type":"ContainerStarted","Data":"acebe2f9c033b233d36e411c21de634121ada0b8473fd12b8911fcb60f8a4bba"} Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.047272 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-api" containerID="cri-o://7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48" gracePeriod=30 Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.047554 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:40 crc kubenswrapper[5010]: I1126 15:51:40.074385 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" podStartSLOduration=3.074366778 podStartE2EDuration="3.074366778s" podCreationTimestamp="2025-11-26 15:51:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:40.071639889 +0000 UTC m=+1520.862357037" watchObservedRunningTime="2025-11-26 15:51:40.074366778 +0000 UTC m=+1520.865083926" Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.061938 5010 generic.go:334] "Generic (PLEG): container finished" podID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerID="97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0" exitCode=143 Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.062007 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c5dfa3f-21c4-461f-8f64-d3c7541859da","Type":"ContainerDied","Data":"97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0"} Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.423025 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.424107 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.424176 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.427806 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.428076 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" gracePeriod=600 Nov 26 15:51:41 crc kubenswrapper[5010]: E1126 15:51:41.566850 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.697135 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.848234 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.988672 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-scripts\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.988783 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-sg-core-conf-yaml\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.988807 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-ceilometer-tls-certs\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.988845 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-log-httpd\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.988962 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbf55\" (UniqueName: \"kubernetes.io/projected/741560a9-42dc-4344-9875-93eb97870572-kube-api-access-pbf55\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.988982 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-config-data\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.989057 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-run-httpd\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.989080 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-combined-ca-bundle\") pod \"741560a9-42dc-4344-9875-93eb97870572\" (UID: \"741560a9-42dc-4344-9875-93eb97870572\") " Nov 26 15:51:41 crc kubenswrapper[5010]: I1126 15:51:41.993013 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.002113 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.037186 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/741560a9-42dc-4344-9875-93eb97870572-kube-api-access-pbf55" (OuterVolumeSpecName: "kube-api-access-pbf55") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "kube-api-access-pbf55". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.056926 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-scripts" (OuterVolumeSpecName: "scripts") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.088505 5010 generic.go:334] "Generic (PLEG): container finished" podID="741560a9-42dc-4344-9875-93eb97870572" containerID="eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9" exitCode=0 Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.088573 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerDied","Data":"eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9"} Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.088603 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"741560a9-42dc-4344-9875-93eb97870572","Type":"ContainerDied","Data":"c982abef584ce9a7e52dfeff89c2f633502fdfb8806ae5fffdbae1aeb6832fa9"} Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.088620 5010 scope.go:117] "RemoveContainer" containerID="f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.088778 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.090608 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.090630 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.090640 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/741560a9-42dc-4344-9875-93eb97870572-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.090651 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbf55\" (UniqueName: \"kubernetes.io/projected/741560a9-42dc-4344-9875-93eb97870572-kube-api-access-pbf55\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.098552 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.100831 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" exitCode=0 Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.100918 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5"} Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.101829 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.101925 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.102762 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.124457 5010 scope.go:117] "RemoveContainer" containerID="4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.152474 5010 scope.go:117] "RemoveContainer" containerID="eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.187642 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.188195 5010 scope.go:117] "RemoveContainer" containerID="573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.196563 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.196603 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.196613 5010 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.204984 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-config-data" (OuterVolumeSpecName: "config-data") pod "741560a9-42dc-4344-9875-93eb97870572" (UID: "741560a9-42dc-4344-9875-93eb97870572"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.210965 5010 scope.go:117] "RemoveContainer" containerID="f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.211419 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f\": container with ID starting with f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f not found: ID does not exist" containerID="f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.211454 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f"} err="failed to get container status \"f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f\": rpc error: code = NotFound desc = could not find container \"f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f\": container with ID starting with f4fca27547eb9f0e00cd0536901cb541cfbd956d4aa20a94622f8c9b52e84e6f not found: ID does not exist" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.211476 5010 scope.go:117] "RemoveContainer" containerID="4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.211675 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21\": container with ID starting with 4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21 not found: ID does not exist" containerID="4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.211697 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21"} err="failed to get container status \"4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21\": rpc error: code = NotFound desc = could not find container \"4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21\": container with ID starting with 4fcbe94417a0a5bf246514cb2b1ba3e960a7d5e888040d1003d2b1b801368e21 not found: ID does not exist" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.211757 5010 scope.go:117] "RemoveContainer" containerID="eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.212247 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9\": container with ID starting with eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9 not found: ID does not exist" containerID="eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.212278 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9"} err="failed to get container status \"eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9\": rpc error: code = NotFound desc = could not find container \"eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9\": container with ID starting with eba8111c4cf736fd972f8169da4657c34dbf973698f42792d577544773c353f9 not found: ID does not exist" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.212300 5010 scope.go:117] "RemoveContainer" containerID="573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.212616 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc\": container with ID starting with 573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc not found: ID does not exist" containerID="573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.212670 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc"} err="failed to get container status \"573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc\": rpc error: code = NotFound desc = could not find container \"573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc\": container with ID starting with 573542f3d51fbdc46029dddf27e48763af0db114ca6a5063603ee46cfa31c1fc not found: ID does not exist" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.212721 5010 scope.go:117] "RemoveContainer" containerID="74af0b7ad1bdddc342c1daa4543b045a23faf8e3bd5f2a3ae5f6ba14cafd4e61" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.300173 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/741560a9-42dc-4344-9875-93eb97870572-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.446648 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.472632 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.488038 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.488677 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-central-agent" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.488727 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-central-agent" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.488757 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="proxy-httpd" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.488769 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="proxy-httpd" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.488780 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-notification-agent" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.488788 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-notification-agent" Nov 26 15:51:42 crc kubenswrapper[5010]: E1126 15:51:42.488813 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="sg-core" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.488825 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="sg-core" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.489075 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-central-agent" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.489108 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="sg-core" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.489130 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="ceilometer-notification-agent" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.489142 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="741560a9-42dc-4344-9875-93eb97870572" containerName="proxy-httpd" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.491582 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.495804 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.496064 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.496189 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.515609 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.607612 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.607698 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnl48\" (UniqueName: \"kubernetes.io/projected/c1c3c42e-0126-41e6-9536-d5096eb44680-kube-api-access-fnl48\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.607747 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-run-httpd\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.607788 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-config-data\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.607816 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-log-httpd\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.607951 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.608025 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-scripts\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.608130 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.709832 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.709906 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-scripts\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.709948 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.709980 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.710026 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnl48\" (UniqueName: \"kubernetes.io/projected/c1c3c42e-0126-41e6-9536-d5096eb44680-kube-api-access-fnl48\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.710051 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-run-httpd\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.710087 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-config-data\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.710115 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-log-httpd\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.710765 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-log-httpd\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.711025 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-run-httpd\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.715624 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.718345 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-config-data\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.719503 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-scripts\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.720408 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.720897 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.731658 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnl48\" (UniqueName: \"kubernetes.io/projected/c1c3c42e-0126-41e6-9536-d5096eb44680-kube-api-access-fnl48\") pod \"ceilometer-0\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " pod="openstack/ceilometer-0" Nov 26 15:51:42 crc kubenswrapper[5010]: I1126 15:51:42.823062 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.295118 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:51:43 crc kubenswrapper[5010]: W1126 15:51:43.308248 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1c3c42e_0126_41e6_9536_d5096eb44680.slice/crio-5f6d223dc8f9dbd9d35de346e7846dd03031ead382790502d2a173c3161b284e WatchSource:0}: Error finding container 5f6d223dc8f9dbd9d35de346e7846dd03031ead382790502d2a173c3161b284e: Status 404 returned error can't find the container with id 5f6d223dc8f9dbd9d35de346e7846dd03031ead382790502d2a173c3161b284e Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.710582 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.832999 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-config-data\") pod \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.833427 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-combined-ca-bundle\") pod \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.833496 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c5dfa3f-21c4-461f-8f64-d3c7541859da-logs\") pod \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.833617 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfswk\" (UniqueName: \"kubernetes.io/projected/2c5dfa3f-21c4-461f-8f64-d3c7541859da-kube-api-access-sfswk\") pod \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\" (UID: \"2c5dfa3f-21c4-461f-8f64-d3c7541859da\") " Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.836051 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c5dfa3f-21c4-461f-8f64-d3c7541859da-logs" (OuterVolumeSpecName: "logs") pod "2c5dfa3f-21c4-461f-8f64-d3c7541859da" (UID: "2c5dfa3f-21c4-461f-8f64-d3c7541859da"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.853291 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c5dfa3f-21c4-461f-8f64-d3c7541859da-kube-api-access-sfswk" (OuterVolumeSpecName: "kube-api-access-sfswk") pod "2c5dfa3f-21c4-461f-8f64-d3c7541859da" (UID: "2c5dfa3f-21c4-461f-8f64-d3c7541859da"). InnerVolumeSpecName "kube-api-access-sfswk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.871801 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-config-data" (OuterVolumeSpecName: "config-data") pod "2c5dfa3f-21c4-461f-8f64-d3c7541859da" (UID: "2c5dfa3f-21c4-461f-8f64-d3c7541859da"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.895343 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c5dfa3f-21c4-461f-8f64-d3c7541859da" (UID: "2c5dfa3f-21c4-461f-8f64-d3c7541859da"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.906534 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="741560a9-42dc-4344-9875-93eb97870572" path="/var/lib/kubelet/pods/741560a9-42dc-4344-9875-93eb97870572/volumes" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.936838 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfswk\" (UniqueName: \"kubernetes.io/projected/2c5dfa3f-21c4-461f-8f64-d3c7541859da-kube-api-access-sfswk\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.936861 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.936872 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c5dfa3f-21c4-461f-8f64-d3c7541859da-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:43 crc kubenswrapper[5010]: I1126 15:51:43.936882 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c5dfa3f-21c4-461f-8f64-d3c7541859da-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.129087 5010 generic.go:334] "Generic (PLEG): container finished" podID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerID="7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48" exitCode=0 Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.129174 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.129195 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c5dfa3f-21c4-461f-8f64-d3c7541859da","Type":"ContainerDied","Data":"7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48"} Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.130353 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"2c5dfa3f-21c4-461f-8f64-d3c7541859da","Type":"ContainerDied","Data":"fcae1108e6e29225435f24a065ca3eefae6aa09d7bc2b4b315862a95d86f3913"} Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.130374 5010 scope.go:117] "RemoveContainer" containerID="7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.134766 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerStarted","Data":"6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b"} Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.134814 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerStarted","Data":"5f6d223dc8f9dbd9d35de346e7846dd03031ead382790502d2a173c3161b284e"} Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.181177 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.194208 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.196799 5010 scope.go:117] "RemoveContainer" containerID="97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.208735 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:44 crc kubenswrapper[5010]: E1126 15:51:44.210966 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-log" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.211027 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-log" Nov 26 15:51:44 crc kubenswrapper[5010]: E1126 15:51:44.211066 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-api" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.211074 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-api" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.211500 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-log" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.211547 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" containerName="nova-api-api" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.220112 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.225140 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.225333 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.225977 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.234459 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.245287 5010 scope.go:117] "RemoveContainer" containerID="7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48" Nov 26 15:51:44 crc kubenswrapper[5010]: E1126 15:51:44.246221 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48\": container with ID starting with 7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48 not found: ID does not exist" containerID="7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.246264 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48"} err="failed to get container status \"7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48\": rpc error: code = NotFound desc = could not find container \"7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48\": container with ID starting with 7f617fc0857412ee8072418af4510a34ead9838b2221761191d30dc2b3b6cf48 not found: ID does not exist" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.246297 5010 scope.go:117] "RemoveContainer" containerID="97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0" Nov 26 15:51:44 crc kubenswrapper[5010]: E1126 15:51:44.246776 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0\": container with ID starting with 97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0 not found: ID does not exist" containerID="97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.246816 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0"} err="failed to get container status \"97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0\": rpc error: code = NotFound desc = could not find container \"97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0\": container with ID starting with 97b5d489be2825beeda98e5cb32f1a637a50b9907e0c5fcb812d8059fc63a1f0 not found: ID does not exist" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.345995 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-public-tls-certs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.346074 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.346266 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-config-data\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.346348 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ab857dd-9828-42a6-8c0f-3ee42a331591-logs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.346501 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.346741 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxkg4\" (UniqueName: \"kubernetes.io/projected/7ab857dd-9828-42a6-8c0f-3ee42a331591-kube-api-access-lxkg4\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.449036 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxkg4\" (UniqueName: \"kubernetes.io/projected/7ab857dd-9828-42a6-8c0f-3ee42a331591-kube-api-access-lxkg4\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.449140 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-public-tls-certs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.449174 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.449213 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-config-data\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.449237 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ab857dd-9828-42a6-8c0f-3ee42a331591-logs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.449263 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.458769 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.460110 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ab857dd-9828-42a6-8c0f-3ee42a331591-logs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.462409 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.463257 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-public-tls-certs\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.468475 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-config-data\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.484191 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxkg4\" (UniqueName: \"kubernetes.io/projected/7ab857dd-9828-42a6-8c0f-3ee42a331591-kube-api-access-lxkg4\") pod \"nova-api-0\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " pod="openstack/nova-api-0" Nov 26 15:51:44 crc kubenswrapper[5010]: I1126 15:51:44.545201 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:51:45 crc kubenswrapper[5010]: W1126 15:51:45.061291 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ab857dd_9828_42a6_8c0f_3ee42a331591.slice/crio-e178b05e8f498bf65f17567ed6d3008f8227efec1c6e38cb14a19827f9bdf4c7 WatchSource:0}: Error finding container e178b05e8f498bf65f17567ed6d3008f8227efec1c6e38cb14a19827f9bdf4c7: Status 404 returned error can't find the container with id e178b05e8f498bf65f17567ed6d3008f8227efec1c6e38cb14a19827f9bdf4c7 Nov 26 15:51:45 crc kubenswrapper[5010]: I1126 15:51:45.071856 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:45 crc kubenswrapper[5010]: I1126 15:51:45.147047 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7ab857dd-9828-42a6-8c0f-3ee42a331591","Type":"ContainerStarted","Data":"e178b05e8f498bf65f17567ed6d3008f8227efec1c6e38cb14a19827f9bdf4c7"} Nov 26 15:51:45 crc kubenswrapper[5010]: I1126 15:51:45.149896 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerStarted","Data":"573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451"} Nov 26 15:51:45 crc kubenswrapper[5010]: I1126 15:51:45.903437 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c5dfa3f-21c4-461f-8f64-d3c7541859da" path="/var/lib/kubelet/pods/2c5dfa3f-21c4-461f-8f64-d3c7541859da/volumes" Nov 26 15:51:46 crc kubenswrapper[5010]: I1126 15:51:46.173033 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7ab857dd-9828-42a6-8c0f-3ee42a331591","Type":"ContainerStarted","Data":"05401c2e702f20bf4d9c209bd1d4c5a9158a80c2d8c5a71ad5557f52cc97b2c4"} Nov 26 15:51:46 crc kubenswrapper[5010]: I1126 15:51:46.173425 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7ab857dd-9828-42a6-8c0f-3ee42a331591","Type":"ContainerStarted","Data":"fa4d420666951588b5d65b90f3542ebb5fcfcef570d808561b232f9f33339428"} Nov 26 15:51:46 crc kubenswrapper[5010]: I1126 15:51:46.178087 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerStarted","Data":"f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c"} Nov 26 15:51:46 crc kubenswrapper[5010]: I1126 15:51:46.217176 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.217153829 podStartE2EDuration="2.217153829s" podCreationTimestamp="2025-11-26 15:51:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:46.207812335 +0000 UTC m=+1526.998529513" watchObservedRunningTime="2025-11-26 15:51:46.217153829 +0000 UTC m=+1527.007870977" Nov 26 15:51:46 crc kubenswrapper[5010]: I1126 15:51:46.697729 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:46 crc kubenswrapper[5010]: I1126 15:51:46.724140 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.215646 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.371481 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-gpxv5"] Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.373162 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.376891 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.377356 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.390679 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gpxv5"] Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.530612 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-config-data\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.531003 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mp9s\" (UniqueName: \"kubernetes.io/projected/1bc7a115-86d7-4b71-8cae-92ce9ca14167-kube-api-access-9mp9s\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.531054 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.531246 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-scripts\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.633623 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mp9s\" (UniqueName: \"kubernetes.io/projected/1bc7a115-86d7-4b71-8cae-92ce9ca14167-kube-api-access-9mp9s\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.633686 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.633781 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-scripts\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.633858 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-config-data\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.640877 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.641564 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-config-data\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.642508 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-scripts\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.648854 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.657123 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mp9s\" (UniqueName: \"kubernetes.io/projected/1bc7a115-86d7-4b71-8cae-92ce9ca14167-kube-api-access-9mp9s\") pod \"nova-cell1-cell-mapping-gpxv5\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.700738 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.732495 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dc9ff69c7-klxtz"] Nov 26 15:51:47 crc kubenswrapper[5010]: I1126 15:51:47.732789 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" podUID="25f06307-6ec3-453f-b620-f76285347939" containerName="dnsmasq-dns" containerID="cri-o://5827b19dbc732d7bd831f5dd9073ae85ba02426144fcab2670e0cb07b87a87fb" gracePeriod=10 Nov 26 15:51:47 crc kubenswrapper[5010]: E1126 15:51:47.975609 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25f06307_6ec3_453f_b620_f76285347939.slice/crio-5827b19dbc732d7bd831f5dd9073ae85ba02426144fcab2670e0cb07b87a87fb.scope\": RecentStats: unable to find data in memory cache]" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.207061 5010 generic.go:334] "Generic (PLEG): container finished" podID="25f06307-6ec3-453f-b620-f76285347939" containerID="5827b19dbc732d7bd831f5dd9073ae85ba02426144fcab2670e0cb07b87a87fb" exitCode=0 Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.208122 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" event={"ID":"25f06307-6ec3-453f-b620-f76285347939","Type":"ContainerDied","Data":"5827b19dbc732d7bd831f5dd9073ae85ba02426144fcab2670e0cb07b87a87fb"} Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.298924 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.348342 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-gpxv5"] Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.461137 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-nb\") pod \"25f06307-6ec3-453f-b620-f76285347939\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.461199 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-svc\") pod \"25f06307-6ec3-453f-b620-f76285347939\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.461295 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-swift-storage-0\") pod \"25f06307-6ec3-453f-b620-f76285347939\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.461335 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-sb\") pod \"25f06307-6ec3-453f-b620-f76285347939\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.461472 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-config\") pod \"25f06307-6ec3-453f-b620-f76285347939\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.461506 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ftb9\" (UniqueName: \"kubernetes.io/projected/25f06307-6ec3-453f-b620-f76285347939-kube-api-access-7ftb9\") pod \"25f06307-6ec3-453f-b620-f76285347939\" (UID: \"25f06307-6ec3-453f-b620-f76285347939\") " Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.465863 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25f06307-6ec3-453f-b620-f76285347939-kube-api-access-7ftb9" (OuterVolumeSpecName: "kube-api-access-7ftb9") pod "25f06307-6ec3-453f-b620-f76285347939" (UID: "25f06307-6ec3-453f-b620-f76285347939"). InnerVolumeSpecName "kube-api-access-7ftb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.520282 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-config" (OuterVolumeSpecName: "config") pod "25f06307-6ec3-453f-b620-f76285347939" (UID: "25f06307-6ec3-453f-b620-f76285347939"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.521804 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "25f06307-6ec3-453f-b620-f76285347939" (UID: "25f06307-6ec3-453f-b620-f76285347939"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.528220 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "25f06307-6ec3-453f-b620-f76285347939" (UID: "25f06307-6ec3-453f-b620-f76285347939"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.541057 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "25f06307-6ec3-453f-b620-f76285347939" (UID: "25f06307-6ec3-453f-b620-f76285347939"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.541333 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "25f06307-6ec3-453f-b620-f76285347939" (UID: "25f06307-6ec3-453f-b620-f76285347939"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.563752 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.563783 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.563795 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.563805 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.563815 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f06307-6ec3-453f-b620-f76285347939-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:48 crc kubenswrapper[5010]: I1126 15:51:48.563823 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ftb9\" (UniqueName: \"kubernetes.io/projected/25f06307-6ec3-453f-b620-f76285347939-kube-api-access-7ftb9\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.219648 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" event={"ID":"25f06307-6ec3-453f-b620-f76285347939","Type":"ContainerDied","Data":"90fea5c929646bff8c1c3f1273ecfda35170201231f60ad14df8c90622a318dc"} Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.219777 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5dc9ff69c7-klxtz" Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.219968 5010 scope.go:117] "RemoveContainer" containerID="5827b19dbc732d7bd831f5dd9073ae85ba02426144fcab2670e0cb07b87a87fb" Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.221558 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gpxv5" event={"ID":"1bc7a115-86d7-4b71-8cae-92ce9ca14167","Type":"ContainerStarted","Data":"bcb613e02e21fe00cab73d805301517aca7702080d4b0be085e5d732ae8551ce"} Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.221577 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gpxv5" event={"ID":"1bc7a115-86d7-4b71-8cae-92ce9ca14167","Type":"ContainerStarted","Data":"9ffe36508bef92662ba59e3d60a15da7a94c37bcbe11d06de6cf81b21a30b0e0"} Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.224741 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerStarted","Data":"4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a"} Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.225387 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.247140 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-gpxv5" podStartSLOduration=2.247123593 podStartE2EDuration="2.247123593s" podCreationTimestamp="2025-11-26 15:51:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:51:49.24380933 +0000 UTC m=+1530.034526488" watchObservedRunningTime="2025-11-26 15:51:49.247123593 +0000 UTC m=+1530.037840741" Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.259135 5010 scope.go:117] "RemoveContainer" containerID="9b30183b18b44114598b0c581ef6557abe5c293dc0a8c6e99f2054f8cd4eaa66" Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.285642 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.5376038359999997 podStartE2EDuration="7.285587405s" podCreationTimestamp="2025-11-26 15:51:42 +0000 UTC" firstStartedPulling="2025-11-26 15:51:43.311602027 +0000 UTC m=+1524.102319215" lastFinishedPulling="2025-11-26 15:51:48.059585646 +0000 UTC m=+1528.850302784" observedRunningTime="2025-11-26 15:51:49.264012015 +0000 UTC m=+1530.054729163" watchObservedRunningTime="2025-11-26 15:51:49.285587405 +0000 UTC m=+1530.076304553" Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.303156 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5dc9ff69c7-klxtz"] Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.315881 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5dc9ff69c7-klxtz"] Nov 26 15:51:49 crc kubenswrapper[5010]: I1126 15:51:49.913302 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25f06307-6ec3-453f-b620-f76285347939" path="/var/lib/kubelet/pods/25f06307-6ec3-453f-b620-f76285347939/volumes" Nov 26 15:51:52 crc kubenswrapper[5010]: I1126 15:51:52.892853 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:51:52 crc kubenswrapper[5010]: E1126 15:51:52.893642 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:51:54 crc kubenswrapper[5010]: I1126 15:51:54.294824 5010 generic.go:334] "Generic (PLEG): container finished" podID="1bc7a115-86d7-4b71-8cae-92ce9ca14167" containerID="bcb613e02e21fe00cab73d805301517aca7702080d4b0be085e5d732ae8551ce" exitCode=0 Nov 26 15:51:54 crc kubenswrapper[5010]: I1126 15:51:54.294916 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gpxv5" event={"ID":"1bc7a115-86d7-4b71-8cae-92ce9ca14167","Type":"ContainerDied","Data":"bcb613e02e21fe00cab73d805301517aca7702080d4b0be085e5d732ae8551ce"} Nov 26 15:51:54 crc kubenswrapper[5010]: I1126 15:51:54.546380 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:51:54 crc kubenswrapper[5010]: I1126 15:51:54.546839 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.563972 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.205:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.563998 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.205:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.805549 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.967598 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-config-data\") pod \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.968112 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-scripts\") pod \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.968322 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9mp9s\" (UniqueName: \"kubernetes.io/projected/1bc7a115-86d7-4b71-8cae-92ce9ca14167-kube-api-access-9mp9s\") pod \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.968390 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-combined-ca-bundle\") pod \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\" (UID: \"1bc7a115-86d7-4b71-8cae-92ce9ca14167\") " Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.974790 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bc7a115-86d7-4b71-8cae-92ce9ca14167-kube-api-access-9mp9s" (OuterVolumeSpecName: "kube-api-access-9mp9s") pod "1bc7a115-86d7-4b71-8cae-92ce9ca14167" (UID: "1bc7a115-86d7-4b71-8cae-92ce9ca14167"). InnerVolumeSpecName "kube-api-access-9mp9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:51:55 crc kubenswrapper[5010]: I1126 15:51:55.975682 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-scripts" (OuterVolumeSpecName: "scripts") pod "1bc7a115-86d7-4b71-8cae-92ce9ca14167" (UID: "1bc7a115-86d7-4b71-8cae-92ce9ca14167"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.004515 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-config-data" (OuterVolumeSpecName: "config-data") pod "1bc7a115-86d7-4b71-8cae-92ce9ca14167" (UID: "1bc7a115-86d7-4b71-8cae-92ce9ca14167"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.018478 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1bc7a115-86d7-4b71-8cae-92ce9ca14167" (UID: "1bc7a115-86d7-4b71-8cae-92ce9ca14167"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.073024 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9mp9s\" (UniqueName: \"kubernetes.io/projected/1bc7a115-86d7-4b71-8cae-92ce9ca14167-kube-api-access-9mp9s\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.073061 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.073075 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.073086 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1bc7a115-86d7-4b71-8cae-92ce9ca14167-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.324453 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-gpxv5" event={"ID":"1bc7a115-86d7-4b71-8cae-92ce9ca14167","Type":"ContainerDied","Data":"9ffe36508bef92662ba59e3d60a15da7a94c37bcbe11d06de6cf81b21a30b0e0"} Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.324771 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-gpxv5" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.324788 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ffe36508bef92662ba59e3d60a15da7a94c37bcbe11d06de6cf81b21a30b0e0" Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.625754 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.626901 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-log" containerID="cri-o://fa4d420666951588b5d65b90f3542ebb5fcfcef570d808561b232f9f33339428" gracePeriod=30 Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.627051 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-api" containerID="cri-o://05401c2e702f20bf4d9c209bd1d4c5a9158a80c2d8c5a71ad5557f52cc97b2c4" gracePeriod=30 Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.648547 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.649004 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="94f927cc-9f34-4ba3-b122-a9be64300828" containerName="nova-scheduler-scheduler" containerID="cri-o://9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" gracePeriod=30 Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.675953 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.676277 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-log" containerID="cri-o://616f09e91a853fa7f48dc2772ca9ce83cdcd59f906f1c5dc972742151702135d" gracePeriod=30 Nov 26 15:51:56 crc kubenswrapper[5010]: I1126 15:51:56.676379 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-metadata" containerID="cri-o://40773634bcf0dbfe66732e09a3774b69fe16ab2fa9cf86335c7a911522a33022" gracePeriod=30 Nov 26 15:51:57 crc kubenswrapper[5010]: I1126 15:51:57.335752 5010 generic.go:334] "Generic (PLEG): container finished" podID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerID="616f09e91a853fa7f48dc2772ca9ce83cdcd59f906f1c5dc972742151702135d" exitCode=143 Nov 26 15:51:57 crc kubenswrapper[5010]: I1126 15:51:57.335874 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"107d20b7-bdc0-467f-a530-a6ed85ecf258","Type":"ContainerDied","Data":"616f09e91a853fa7f48dc2772ca9ce83cdcd59f906f1c5dc972742151702135d"} Nov 26 15:51:57 crc kubenswrapper[5010]: I1126 15:51:57.340997 5010 generic.go:334] "Generic (PLEG): container finished" podID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerID="fa4d420666951588b5d65b90f3542ebb5fcfcef570d808561b232f9f33339428" exitCode=143 Nov 26 15:51:57 crc kubenswrapper[5010]: I1126 15:51:57.341039 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7ab857dd-9828-42a6-8c0f-3ee42a331591","Type":"ContainerDied","Data":"fa4d420666951588b5d65b90f3542ebb5fcfcef570d808561b232f9f33339428"} Nov 26 15:51:58 crc kubenswrapper[5010]: E1126 15:51:58.069598 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:51:58 crc kubenswrapper[5010]: E1126 15:51:58.072199 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:51:58 crc kubenswrapper[5010]: E1126 15:51:58.074215 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:51:58 crc kubenswrapper[5010]: E1126 15:51:58.074273 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="94f927cc-9f34-4ba3-b122-a9be64300828" containerName="nova-scheduler-scheduler" Nov 26 15:51:59 crc kubenswrapper[5010]: I1126 15:51:59.809554 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:57828->10.217.0.197:8775: read: connection reset by peer" Nov 26 15:51:59 crc kubenswrapper[5010]: I1126 15:51:59.809569 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:57830->10.217.0.197:8775: read: connection reset by peer" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.379997 5010 generic.go:334] "Generic (PLEG): container finished" podID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerID="40773634bcf0dbfe66732e09a3774b69fe16ab2fa9cf86335c7a911522a33022" exitCode=0 Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.380480 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"107d20b7-bdc0-467f-a530-a6ed85ecf258","Type":"ContainerDied","Data":"40773634bcf0dbfe66732e09a3774b69fe16ab2fa9cf86335c7a911522a33022"} Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.380522 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"107d20b7-bdc0-467f-a530-a6ed85ecf258","Type":"ContainerDied","Data":"b03bf94ac2fe082918b042b25c4cd45bab7c89db2ec7699f547a551f682e4efe"} Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.380545 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b03bf94ac2fe082918b042b25c4cd45bab7c89db2ec7699f547a551f682e4efe" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.415348 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.582147 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-nova-metadata-tls-certs\") pod \"107d20b7-bdc0-467f-a530-a6ed85ecf258\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.582271 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7t28\" (UniqueName: \"kubernetes.io/projected/107d20b7-bdc0-467f-a530-a6ed85ecf258-kube-api-access-k7t28\") pod \"107d20b7-bdc0-467f-a530-a6ed85ecf258\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.582360 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-config-data\") pod \"107d20b7-bdc0-467f-a530-a6ed85ecf258\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.582425 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-combined-ca-bundle\") pod \"107d20b7-bdc0-467f-a530-a6ed85ecf258\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.582548 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/107d20b7-bdc0-467f-a530-a6ed85ecf258-logs\") pod \"107d20b7-bdc0-467f-a530-a6ed85ecf258\" (UID: \"107d20b7-bdc0-467f-a530-a6ed85ecf258\") " Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.583220 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/107d20b7-bdc0-467f-a530-a6ed85ecf258-logs" (OuterVolumeSpecName: "logs") pod "107d20b7-bdc0-467f-a530-a6ed85ecf258" (UID: "107d20b7-bdc0-467f-a530-a6ed85ecf258"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.590795 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/107d20b7-bdc0-467f-a530-a6ed85ecf258-kube-api-access-k7t28" (OuterVolumeSpecName: "kube-api-access-k7t28") pod "107d20b7-bdc0-467f-a530-a6ed85ecf258" (UID: "107d20b7-bdc0-467f-a530-a6ed85ecf258"). InnerVolumeSpecName "kube-api-access-k7t28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.628155 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "107d20b7-bdc0-467f-a530-a6ed85ecf258" (UID: "107d20b7-bdc0-467f-a530-a6ed85ecf258"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.630235 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-config-data" (OuterVolumeSpecName: "config-data") pod "107d20b7-bdc0-467f-a530-a6ed85ecf258" (UID: "107d20b7-bdc0-467f-a530-a6ed85ecf258"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.666341 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "107d20b7-bdc0-467f-a530-a6ed85ecf258" (UID: "107d20b7-bdc0-467f-a530-a6ed85ecf258"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.684743 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/107d20b7-bdc0-467f-a530-a6ed85ecf258-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.684785 5010 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.684802 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7t28\" (UniqueName: \"kubernetes.io/projected/107d20b7-bdc0-467f-a530-a6ed85ecf258-kube-api-access-k7t28\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.684814 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:00 crc kubenswrapper[5010]: I1126 15:52:00.684826 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/107d20b7-bdc0-467f-a530-a6ed85ecf258-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.389857 5010 generic.go:334] "Generic (PLEG): container finished" podID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerID="05401c2e702f20bf4d9c209bd1d4c5a9158a80c2d8c5a71ad5557f52cc97b2c4" exitCode=0 Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.389922 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7ab857dd-9828-42a6-8c0f-3ee42a331591","Type":"ContainerDied","Data":"05401c2e702f20bf4d9c209bd1d4c5a9158a80c2d8c5a71ad5557f52cc97b2c4"} Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.389938 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.427145 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.440955 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.467367 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:01 crc kubenswrapper[5010]: E1126 15:52:01.468069 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-log" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468088 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-log" Nov 26 15:52:01 crc kubenswrapper[5010]: E1126 15:52:01.468103 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-metadata" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468112 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-metadata" Nov 26 15:52:01 crc kubenswrapper[5010]: E1126 15:52:01.468154 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1bc7a115-86d7-4b71-8cae-92ce9ca14167" containerName="nova-manage" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468164 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1bc7a115-86d7-4b71-8cae-92ce9ca14167" containerName="nova-manage" Nov 26 15:52:01 crc kubenswrapper[5010]: E1126 15:52:01.468175 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f06307-6ec3-453f-b620-f76285347939" containerName="init" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468183 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f06307-6ec3-453f-b620-f76285347939" containerName="init" Nov 26 15:52:01 crc kubenswrapper[5010]: E1126 15:52:01.468204 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25f06307-6ec3-453f-b620-f76285347939" containerName="dnsmasq-dns" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468214 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="25f06307-6ec3-453f-b620-f76285347939" containerName="dnsmasq-dns" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468463 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-metadata" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468481 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1bc7a115-86d7-4b71-8cae-92ce9ca14167" containerName="nova-manage" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468523 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="25f06307-6ec3-453f-b620-f76285347939" containerName="dnsmasq-dns" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.468540 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" containerName="nova-metadata-log" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.481611 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.481772 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.484041 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.484302 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.605458 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq42x\" (UniqueName: \"kubernetes.io/projected/228e9671-d3dc-45dd-b200-7496327ebcda-kube-api-access-nq42x\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.605565 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-config-data\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.605737 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.605765 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/228e9671-d3dc-45dd-b200-7496327ebcda-logs\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.605863 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.641757 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.707978 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-config-data\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.708362 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.709103 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/228e9671-d3dc-45dd-b200-7496327ebcda-logs\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.709254 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.709407 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nq42x\" (UniqueName: \"kubernetes.io/projected/228e9671-d3dc-45dd-b200-7496327ebcda-kube-api-access-nq42x\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.710263 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/228e9671-d3dc-45dd-b200-7496327ebcda-logs\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.714210 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-config-data\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.715179 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.716150 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.726361 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq42x\" (UniqueName: \"kubernetes.io/projected/228e9671-d3dc-45dd-b200-7496327ebcda-kube-api-access-nq42x\") pod \"nova-metadata-0\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.811046 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxkg4\" (UniqueName: \"kubernetes.io/projected/7ab857dd-9828-42a6-8c0f-3ee42a331591-kube-api-access-lxkg4\") pod \"7ab857dd-9828-42a6-8c0f-3ee42a331591\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.811337 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-public-tls-certs\") pod \"7ab857dd-9828-42a6-8c0f-3ee42a331591\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.811378 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ab857dd-9828-42a6-8c0f-3ee42a331591-logs\") pod \"7ab857dd-9828-42a6-8c0f-3ee42a331591\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.811410 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-internal-tls-certs\") pod \"7ab857dd-9828-42a6-8c0f-3ee42a331591\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.811433 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-combined-ca-bundle\") pod \"7ab857dd-9828-42a6-8c0f-3ee42a331591\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.811624 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-config-data\") pod \"7ab857dd-9828-42a6-8c0f-3ee42a331591\" (UID: \"7ab857dd-9828-42a6-8c0f-3ee42a331591\") " Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.812487 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7ab857dd-9828-42a6-8c0f-3ee42a331591-logs" (OuterVolumeSpecName: "logs") pod "7ab857dd-9828-42a6-8c0f-3ee42a331591" (UID: "7ab857dd-9828-42a6-8c0f-3ee42a331591"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.812927 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.827129 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ab857dd-9828-42a6-8c0f-3ee42a331591-kube-api-access-lxkg4" (OuterVolumeSpecName: "kube-api-access-lxkg4") pod "7ab857dd-9828-42a6-8c0f-3ee42a331591" (UID: "7ab857dd-9828-42a6-8c0f-3ee42a331591"). InnerVolumeSpecName "kube-api-access-lxkg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.847696 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-config-data" (OuterVolumeSpecName: "config-data") pod "7ab857dd-9828-42a6-8c0f-3ee42a331591" (UID: "7ab857dd-9828-42a6-8c0f-3ee42a331591"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.880618 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ab857dd-9828-42a6-8c0f-3ee42a331591" (UID: "7ab857dd-9828-42a6-8c0f-3ee42a331591"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.883050 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7ab857dd-9828-42a6-8c0f-3ee42a331591" (UID: "7ab857dd-9828-42a6-8c0f-3ee42a331591"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.907504 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="107d20b7-bdc0-467f-a530-a6ed85ecf258" path="/var/lib/kubelet/pods/107d20b7-bdc0-467f-a530-a6ed85ecf258/volumes" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.913341 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.913366 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ab857dd-9828-42a6-8c0f-3ee42a331591-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.913376 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.913385 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.913394 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxkg4\" (UniqueName: \"kubernetes.io/projected/7ab857dd-9828-42a6-8c0f-3ee42a331591-kube-api-access-lxkg4\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:01 crc kubenswrapper[5010]: I1126 15:52:01.920790 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7ab857dd-9828-42a6-8c0f-3ee42a331591" (UID: "7ab857dd-9828-42a6-8c0f-3ee42a331591"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.018314 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ab857dd-9828-42a6-8c0f-3ee42a331591-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.274484 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.387252 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.401107 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"228e9671-d3dc-45dd-b200-7496327ebcda","Type":"ContainerStarted","Data":"94b484d9d58e76a9c9df1ee1f29b1cceaf21e784e8b17d372c94070df020c504"} Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.403014 5010 generic.go:334] "Generic (PLEG): container finished" podID="94f927cc-9f34-4ba3-b122-a9be64300828" containerID="9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" exitCode=0 Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.403100 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"94f927cc-9f34-4ba3-b122-a9be64300828","Type":"ContainerDied","Data":"9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769"} Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.403204 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"94f927cc-9f34-4ba3-b122-a9be64300828","Type":"ContainerDied","Data":"ee2537137cefc2526c532ae5b3a701444d28b884850a6ab7011b4edd287fc923"} Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.403238 5010 scope.go:117] "RemoveContainer" containerID="9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.403463 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.407229 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7ab857dd-9828-42a6-8c0f-3ee42a331591","Type":"ContainerDied","Data":"e178b05e8f498bf65f17567ed6d3008f8227efec1c6e38cb14a19827f9bdf4c7"} Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.407362 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.437200 5010 scope.go:117] "RemoveContainer" containerID="9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" Nov 26 15:52:02 crc kubenswrapper[5010]: E1126 15:52:02.441654 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769\": container with ID starting with 9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769 not found: ID does not exist" containerID="9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.441780 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769"} err="failed to get container status \"9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769\": rpc error: code = NotFound desc = could not find container \"9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769\": container with ID starting with 9e46f2914a9a96d21f8bf55f780c44de355b3edbe25ab1c64849f80a5a620769 not found: ID does not exist" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.441827 5010 scope.go:117] "RemoveContainer" containerID="05401c2e702f20bf4d9c209bd1d4c5a9158a80c2d8c5a71ad5557f52cc97b2c4" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.469385 5010 scope.go:117] "RemoveContainer" containerID="fa4d420666951588b5d65b90f3542ebb5fcfcef570d808561b232f9f33339428" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.479053 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.506429 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.525040 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: E1126 15:52:02.525538 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94f927cc-9f34-4ba3-b122-a9be64300828" containerName="nova-scheduler-scheduler" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.525553 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="94f927cc-9f34-4ba3-b122-a9be64300828" containerName="nova-scheduler-scheduler" Nov 26 15:52:02 crc kubenswrapper[5010]: E1126 15:52:02.525612 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-log" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.525619 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-log" Nov 26 15:52:02 crc kubenswrapper[5010]: E1126 15:52:02.525629 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-api" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.525636 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-api" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.525851 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-log" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.525867 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="94f927cc-9f34-4ba3-b122-a9be64300828" containerName="nova-scheduler-scheduler" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.525877 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" containerName="nova-api-api" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.527625 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.530979 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.531073 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.531170 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.538676 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.547645 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-config-data\") pod \"94f927cc-9f34-4ba3-b122-a9be64300828\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.547799 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b2d28\" (UniqueName: \"kubernetes.io/projected/94f927cc-9f34-4ba3-b122-a9be64300828-kube-api-access-b2d28\") pod \"94f927cc-9f34-4ba3-b122-a9be64300828\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.547930 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-combined-ca-bundle\") pod \"94f927cc-9f34-4ba3-b122-a9be64300828\" (UID: \"94f927cc-9f34-4ba3-b122-a9be64300828\") " Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.554581 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94f927cc-9f34-4ba3-b122-a9be64300828-kube-api-access-b2d28" (OuterVolumeSpecName: "kube-api-access-b2d28") pod "94f927cc-9f34-4ba3-b122-a9be64300828" (UID: "94f927cc-9f34-4ba3-b122-a9be64300828"). InnerVolumeSpecName "kube-api-access-b2d28". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.583426 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "94f927cc-9f34-4ba3-b122-a9be64300828" (UID: "94f927cc-9f34-4ba3-b122-a9be64300828"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.599805 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-config-data" (OuterVolumeSpecName: "config-data") pod "94f927cc-9f34-4ba3-b122-a9be64300828" (UID: "94f927cc-9f34-4ba3-b122-a9be64300828"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.649933 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.650151 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92c26092-3d97-417f-aaa7-48723d6c88be-logs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.650385 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lv6pw\" (UniqueName: \"kubernetes.io/projected/92c26092-3d97-417f-aaa7-48723d6c88be-kube-api-access-lv6pw\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.650663 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-internal-tls-certs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.650750 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-public-tls-certs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.650820 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-config-data\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.650974 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.650991 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/94f927cc-9f34-4ba3-b122-a9be64300828-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.651003 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b2d28\" (UniqueName: \"kubernetes.io/projected/94f927cc-9f34-4ba3-b122-a9be64300828-kube-api-access-b2d28\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.733823 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.743606 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.752000 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-internal-tls-certs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.752046 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-public-tls-certs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.752070 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-config-data\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.752125 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.752145 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92c26092-3d97-417f-aaa7-48723d6c88be-logs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.752186 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lv6pw\" (UniqueName: \"kubernetes.io/projected/92c26092-3d97-417f-aaa7-48723d6c88be-kube-api-access-lv6pw\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.752954 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92c26092-3d97-417f-aaa7-48723d6c88be-logs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.756079 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.757978 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.760183 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-config-data\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.761116 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.761379 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-public-tls-certs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.761556 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.763521 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-internal-tls-certs\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.768352 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.779988 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lv6pw\" (UniqueName: \"kubernetes.io/projected/92c26092-3d97-417f-aaa7-48723d6c88be-kube-api-access-lv6pw\") pod \"nova-api-0\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.850423 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.854902 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.854994 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s5jbw\" (UniqueName: \"kubernetes.io/projected/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-kube-api-access-s5jbw\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.855353 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-config-data\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.961825 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.962079 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s5jbw\" (UniqueName: \"kubernetes.io/projected/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-kube-api-access-s5jbw\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.962211 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-config-data\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.972162 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-config-data\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.981500 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:02 crc kubenswrapper[5010]: I1126 15:52:02.988978 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s5jbw\" (UniqueName: \"kubernetes.io/projected/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-kube-api-access-s5jbw\") pod \"nova-scheduler-0\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " pod="openstack/nova-scheduler-0" Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.081204 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.394658 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.435210 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"92c26092-3d97-417f-aaa7-48723d6c88be","Type":"ContainerStarted","Data":"673dbc71df9a4d6faf4dfc22583eacd90d47aa62c25e74b92b8eba49fec3ef5c"} Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.439737 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"228e9671-d3dc-45dd-b200-7496327ebcda","Type":"ContainerStarted","Data":"ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb"} Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.439781 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"228e9671-d3dc-45dd-b200-7496327ebcda","Type":"ContainerStarted","Data":"c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a"} Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.689543 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.689523736 podStartE2EDuration="2.689523736s" podCreationTimestamp="2025-11-26 15:52:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:52:03.464452056 +0000 UTC m=+1544.255169234" watchObservedRunningTime="2025-11-26 15:52:03.689523736 +0000 UTC m=+1544.480240884" Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.689839 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:03 crc kubenswrapper[5010]: W1126 15:52:03.702418 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9fc9e37_6c7d_45d8_81e2_c6a175467c12.slice/crio-f500719e4fc56901646d79e8a6b106901abfebb09904e54c377b40efd7267425 WatchSource:0}: Error finding container f500719e4fc56901646d79e8a6b106901abfebb09904e54c377b40efd7267425: Status 404 returned error can't find the container with id f500719e4fc56901646d79e8a6b106901abfebb09904e54c377b40efd7267425 Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.908040 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ab857dd-9828-42a6-8c0f-3ee42a331591" path="/var/lib/kubelet/pods/7ab857dd-9828-42a6-8c0f-3ee42a331591/volumes" Nov 26 15:52:03 crc kubenswrapper[5010]: I1126 15:52:03.909223 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94f927cc-9f34-4ba3-b122-a9be64300828" path="/var/lib/kubelet/pods/94f927cc-9f34-4ba3-b122-a9be64300828/volumes" Nov 26 15:52:04 crc kubenswrapper[5010]: I1126 15:52:04.455916 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"92c26092-3d97-417f-aaa7-48723d6c88be","Type":"ContainerStarted","Data":"8b5f9be0c133e2c0d365af8abb3b23cff3165b9fc4853de720fd9f31b4b01e06"} Nov 26 15:52:04 crc kubenswrapper[5010]: I1126 15:52:04.456008 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"92c26092-3d97-417f-aaa7-48723d6c88be","Type":"ContainerStarted","Data":"26f349f0d4a74599a92410c53237aaac653bda0a60be6f0aa87a4a0d24166ef0"} Nov 26 15:52:04 crc kubenswrapper[5010]: I1126 15:52:04.458681 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a9fc9e37-6c7d-45d8-81e2-c6a175467c12","Type":"ContainerStarted","Data":"a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56"} Nov 26 15:52:04 crc kubenswrapper[5010]: I1126 15:52:04.458765 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a9fc9e37-6c7d-45d8-81e2-c6a175467c12","Type":"ContainerStarted","Data":"f500719e4fc56901646d79e8a6b106901abfebb09904e54c377b40efd7267425"} Nov 26 15:52:04 crc kubenswrapper[5010]: I1126 15:52:04.509457 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.509424306 podStartE2EDuration="2.509424306s" podCreationTimestamp="2025-11-26 15:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:52:04.492078852 +0000 UTC m=+1545.282796040" watchObservedRunningTime="2025-11-26 15:52:04.509424306 +0000 UTC m=+1545.300141494" Nov 26 15:52:04 crc kubenswrapper[5010]: I1126 15:52:04.524034 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.52400398 podStartE2EDuration="2.52400398s" podCreationTimestamp="2025-11-26 15:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:52:04.51119484 +0000 UTC m=+1545.301912008" watchObservedRunningTime="2025-11-26 15:52:04.52400398 +0000 UTC m=+1545.314721168" Nov 26 15:52:06 crc kubenswrapper[5010]: I1126 15:52:06.813006 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 15:52:06 crc kubenswrapper[5010]: I1126 15:52:06.813807 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 15:52:06 crc kubenswrapper[5010]: I1126 15:52:06.891399 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:52:06 crc kubenswrapper[5010]: E1126 15:52:06.891645 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:52:08 crc kubenswrapper[5010]: I1126 15:52:08.082802 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 15:52:11 crc kubenswrapper[5010]: I1126 15:52:11.813093 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 15:52:11 crc kubenswrapper[5010]: I1126 15:52:11.813947 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 15:52:12 crc kubenswrapper[5010]: I1126 15:52:12.830070 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 15:52:12 crc kubenswrapper[5010]: I1126 15:52:12.830201 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 15:52:12 crc kubenswrapper[5010]: I1126 15:52:12.851214 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:52:12 crc kubenswrapper[5010]: I1126 15:52:12.851266 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 15:52:12 crc kubenswrapper[5010]: I1126 15:52:12.852021 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 15:52:13 crc kubenswrapper[5010]: I1126 15:52:13.082473 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 15:52:13 crc kubenswrapper[5010]: I1126 15:52:13.113109 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 15:52:13 crc kubenswrapper[5010]: I1126 15:52:13.605190 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 15:52:13 crc kubenswrapper[5010]: I1126 15:52:13.865004 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 15:52:13 crc kubenswrapper[5010]: I1126 15:52:13.865009 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.208:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 15:52:20 crc kubenswrapper[5010]: I1126 15:52:20.893154 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:52:20 crc kubenswrapper[5010]: E1126 15:52:20.894138 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:52:21 crc kubenswrapper[5010]: I1126 15:52:21.820526 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 15:52:21 crc kubenswrapper[5010]: I1126 15:52:21.828019 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 15:52:21 crc kubenswrapper[5010]: I1126 15:52:21.840538 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 15:52:22 crc kubenswrapper[5010]: I1126 15:52:22.687754 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 15:52:22 crc kubenswrapper[5010]: I1126 15:52:22.866138 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 15:52:22 crc kubenswrapper[5010]: I1126 15:52:22.867792 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 15:52:22 crc kubenswrapper[5010]: I1126 15:52:22.873266 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 15:52:22 crc kubenswrapper[5010]: I1126 15:52:22.874924 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 15:52:23 crc kubenswrapper[5010]: I1126 15:52:23.698017 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 15:52:23 crc kubenswrapper[5010]: I1126 15:52:23.707323 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 15:52:32 crc kubenswrapper[5010]: I1126 15:52:32.892128 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:52:32 crc kubenswrapper[5010]: E1126 15:52:32.893376 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:52:43 crc kubenswrapper[5010]: I1126 15:52:43.927044 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement0cf4-account-delete-xrw9x"] Nov 26 15:52:43 crc kubenswrapper[5010]: I1126 15:52:43.929175 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:43 crc kubenswrapper[5010]: I1126 15:52:43.945684 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 26 15:52:43 crc kubenswrapper[5010]: I1126 15:52:43.945967 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="08acaf58-5c2f-4fb4-8863-846c28f8d016" containerName="openstackclient" containerID="cri-o://e30a58057f8e14429694a2b07ec64cfe7a7ea07313dd194b06c05df065dded6f" gracePeriod=2 Nov 26 15:52:43 crc kubenswrapper[5010]: I1126 15:52:43.975772 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement0cf4-account-delete-xrw9x"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.000801 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-f7n92"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.025351 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-nbrh7"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.046750 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-vl2vn"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.046938 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-vl2vn" podUID="9d6a5d15-b08c-481b-84af-88e05824b26a" containerName="openstack-network-exporter" containerID="cri-o://d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab" gracePeriod=30 Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.056778 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.076243 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.091423 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8c11462-1366-4e0f-9003-6079b25c6b04-operator-scripts\") pod \"placement0cf4-account-delete-xrw9x\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.091580 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d45c\" (UniqueName: \"kubernetes.io/projected/e8c11462-1366-4e0f-9003-6079b25c6b04-kube-api-access-6d45c\") pod \"placement0cf4-account-delete-xrw9x\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.093195 5010 configmap.go:193] Couldn't get configMap openstack/ovncontroller-scripts: configmap "ovncontroller-scripts" not found Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.093248 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts podName:3261dde1-64a6-4fe7-851e-4a5754444fd0 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:44.593231804 +0000 UTC m=+1585.383948952 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts") pod "ovn-controller-nbrh7" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0") : configmap "ovncontroller-scripts" not found Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.110013 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-zbphj"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.124130 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-zbphj"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.140215 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.140448 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="ovn-northd" containerID="cri-o://3b98cba8078e790765a3a58a436a7c3b361b88b1f2e0cfb60098baee4f4cce2a" gracePeriod=30 Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.140871 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="openstack-network-exporter" containerID="cri-o://2aa7f2cde724ae9be71611e2947e9786538808ad37c2bc8674777309a8ce98ab" gracePeriod=30 Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.159724 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder3420-account-delete-8w2px"] Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.160227 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08acaf58-5c2f-4fb4-8863-846c28f8d016" containerName="openstackclient" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.160242 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="08acaf58-5c2f-4fb4-8863-846c28f8d016" containerName="openstackclient" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.160451 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="08acaf58-5c2f-4fb4-8863-846c28f8d016" containerName="openstackclient" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.161177 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.177130 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder3420-account-delete-8w2px"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.207511 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d45c\" (UniqueName: \"kubernetes.io/projected/e8c11462-1366-4e0f-9003-6079b25c6b04-kube-api-access-6d45c\") pod \"placement0cf4-account-delete-xrw9x\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.207775 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8c11462-1366-4e0f-9003-6079b25c6b04-operator-scripts\") pod \"placement0cf4-account-delete-xrw9x\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.209630 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8c11462-1366-4e0f-9003-6079b25c6b04-operator-scripts\") pod \"placement0cf4-account-delete-xrw9x\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.209754 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.209830 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data podName:a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:44.70981199 +0000 UTC m=+1585.500529138 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data") pod "rabbitmq-cell1-server-0" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25") : configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.268558 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d45c\" (UniqueName: \"kubernetes.io/projected/e8c11462-1366-4e0f-9003-6079b25c6b04-kube-api-access-6d45c\") pod \"placement0cf4-account-delete-xrw9x\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.278454 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.309616 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjppj\" (UniqueName: \"kubernetes.io/projected/7e02370f-1b63-47f7-8d66-ba7c94310c38-kube-api-access-fjppj\") pod \"cinder3420-account-delete-8w2px\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.309761 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e02370f-1b63-47f7-8d66-ba7c94310c38-operator-scripts\") pod \"cinder3420-account-delete-8w2px\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.310649 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican920e-account-delete-bv9zw"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.312055 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.326366 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican920e-account-delete-bv9zw"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.364961 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glancec7b0-account-delete-9tpdl"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.366325 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.414225 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-operator-scripts\") pod \"barbican920e-account-delete-bv9zw\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.414279 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjppj\" (UniqueName: \"kubernetes.io/projected/7e02370f-1b63-47f7-8d66-ba7c94310c38-kube-api-access-fjppj\") pod \"cinder3420-account-delete-8w2px\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.414341 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nm9vx\" (UniqueName: \"kubernetes.io/projected/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-kube-api-access-nm9vx\") pod \"barbican920e-account-delete-bv9zw\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.414386 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e02370f-1b63-47f7-8d66-ba7c94310c38-operator-scripts\") pod \"cinder3420-account-delete-8w2px\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.416171 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e02370f-1b63-47f7-8d66-ba7c94310c38-operator-scripts\") pod \"cinder3420-account-delete-8w2px\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.450124 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjppj\" (UniqueName: \"kubernetes.io/projected/7e02370f-1b63-47f7-8d66-ba7c94310c38-kube-api-access-fjppj\") pod \"cinder3420-account-delete-8w2px\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.463687 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glancec7b0-account-delete-9tpdl"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.519299 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e8dfd8a-0624-4f78-8c35-c6710328de9d-operator-scripts\") pod \"glancec7b0-account-delete-9tpdl\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.519377 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-operator-scripts\") pod \"barbican920e-account-delete-bv9zw\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.519427 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nm9vx\" (UniqueName: \"kubernetes.io/projected/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-kube-api-access-nm9vx\") pod \"barbican920e-account-delete-bv9zw\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.519477 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xttv8\" (UniqueName: \"kubernetes.io/projected/2e8dfd8a-0624-4f78-8c35-c6710328de9d-kube-api-access-xttv8\") pod \"glancec7b0-account-delete-9tpdl\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.520445 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-operator-scripts\") pod \"barbican920e-account-delete-bv9zw\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.533417 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-hgfkn"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.557313 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nm9vx\" (UniqueName: \"kubernetes.io/projected/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-kube-api-access-nm9vx\") pod \"barbican920e-account-delete-bv9zw\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.557406 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-hgfkn"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.586055 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novaapif128-account-delete-msqb5"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.587540 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.602794 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapif128-account-delete-msqb5"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.623092 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e8dfd8a-0624-4f78-8c35-c6710328de9d-operator-scripts\") pod \"glancec7b0-account-delete-9tpdl\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.623198 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xttv8\" (UniqueName: \"kubernetes.io/projected/2e8dfd8a-0624-4f78-8c35-c6710328de9d-kube-api-access-xttv8\") pod \"glancec7b0-account-delete-9tpdl\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.623523 5010 configmap.go:193] Couldn't get configMap openstack/ovncontroller-scripts: configmap "ovncontroller-scripts" not found Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.623561 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts podName:3261dde1-64a6-4fe7-851e-4a5754444fd0 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:45.623548499 +0000 UTC m=+1586.414265647 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts") pod "ovn-controller-nbrh7" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0") : configmap "ovncontroller-scripts" not found Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.624551 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e8dfd8a-0624-4f78-8c35-c6710328de9d-operator-scripts\") pod \"glancec7b0-account-delete-9tpdl\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.646768 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/novacell0e911-account-delete-8hd9j"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.648446 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.664431 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.666072 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xttv8\" (UniqueName: \"kubernetes.io/projected/2e8dfd8a-0624-4f78-8c35-c6710328de9d-kube-api-access-xttv8\") pod \"glancec7b0-account-delete-9tpdl\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.666153 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0e911-account-delete-8hd9j"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.681289 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.684547 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.725349 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxt9m\" (UniqueName: \"kubernetes.io/projected/37e7e487-28ea-405b-a645-a85aa94e12d2-kube-api-access-dxt9m\") pod \"novaapif128-account-delete-msqb5\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.725524 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e7e487-28ea-405b-a645-a85aa94e12d2-operator-scripts\") pod \"novaapif128-account-delete-msqb5\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.725637 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.725677 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data podName:a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:45.725662683 +0000 UTC m=+1586.516379831 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data") pod "rabbitmq-cell1-server-0" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25") : configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.736326 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-546d9f9b4-87p6s"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.773883 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-546d9f9b4-87p6s" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-log" containerID="cri-o://5333a0de78b475fd78f332fa0f32083caa1395fc128350a6a203fa02b8019334" gracePeriod=30 Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.777922 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-546d9f9b4-87p6s" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-api" containerID="cri-o://a86003926de01550b467b33cbf762fa3bc24eb67a06d8b70ca85b43666377672" gracePeriod=30 Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.835746 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35439472-3a5f-450f-9fcc-2a739253ad5b-operator-scripts\") pod \"novacell0e911-account-delete-8hd9j\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.836168 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6x6m\" (UniqueName: \"kubernetes.io/projected/35439472-3a5f-450f-9fcc-2a739253ad5b-kube-api-access-w6x6m\") pod \"novacell0e911-account-delete-8hd9j\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.836260 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e7e487-28ea-405b-a645-a85aa94e12d2-operator-scripts\") pod \"novaapif128-account-delete-msqb5\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.836364 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxt9m\" (UniqueName: \"kubernetes.io/projected/37e7e487-28ea-405b-a645-a85aa94e12d2-kube-api-access-dxt9m\") pod \"novaapif128-account-delete-msqb5\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.853318 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e7e487-28ea-405b-a645-a85aa94e12d2-operator-scripts\") pod \"novaapif128-account-delete-msqb5\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.913418 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxt9m\" (UniqueName: \"kubernetes.io/projected/37e7e487-28ea-405b-a645-a85aa94e12d2-kube-api-access-dxt9m\") pod \"novaapif128-account-delete-msqb5\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.936871 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-755bdc5489-xpxxn"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.937356 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" podUID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerName="dnsmasq-dns" containerID="cri-o://acebe2f9c033b233d36e411c21de634121ada0b8473fd12b8911fcb60f8a4bba" gracePeriod=10 Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.944358 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35439472-3a5f-450f-9fcc-2a739253ad5b-operator-scripts\") pod \"novacell0e911-account-delete-8hd9j\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.944477 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6x6m\" (UniqueName: \"kubernetes.io/projected/35439472-3a5f-450f-9fcc-2a739253ad5b-kube-api-access-w6x6m\") pod \"novacell0e911-account-delete-8hd9j\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.946272 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35439472-3a5f-450f-9fcc-2a739253ad5b-operator-scripts\") pod \"novacell0e911-account-delete-8hd9j\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.963479 5010 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-nbrh7" message=< Nov 26 15:52:44 crc kubenswrapper[5010]: Exiting ovn-controller (1) [ OK ] Nov 26 15:52:44 crc kubenswrapper[5010]: > Nov 26 15:52:44 crc kubenswrapper[5010]: E1126 15:52:44.963533 5010 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-nbrh7" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" containerID="cri-o://e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02" Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.963572 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-nbrh7" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" containerID="cri-o://e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02" gracePeriod=30 Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.964021 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 15:52:44 crc kubenswrapper[5010]: I1126 15:52:44.974465 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-x7zvb"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.001035 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-vl2vn_9d6a5d15-b08c-481b-84af-88e05824b26a/openstack-network-exporter/0.log" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.001097 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.016438 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6x6m\" (UniqueName: \"kubernetes.io/projected/35439472-3a5f-450f-9fcc-2a739253ad5b-kube-api-access-w6x6m\") pod \"novacell0e911-account-delete-8hd9j\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.018582 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.026592 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-x7zvb"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.029449 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.048142 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.048579 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data podName:9940cbe6-c323-4320-9e45-463e5c023156 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:45.54856032 +0000 UTC m=+1586.339277468 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data") pod "rabbitmq-server-0" (UID: "9940cbe6-c323-4320-9e45-463e5c023156") : configmap "rabbitmq-config-data" not found Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.052541 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-p4jwm"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.072233 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-vl2vn_9d6a5d15-b08c-481b-84af-88e05824b26a/openstack-network-exporter/0.log" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.072282 5010 generic.go:334] "Generic (PLEG): container finished" podID="9d6a5d15-b08c-481b-84af-88e05824b26a" containerID="d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab" exitCode=2 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.072357 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-vl2vn" event={"ID":"9d6a5d15-b08c-481b-84af-88e05824b26a","Type":"ContainerDied","Data":"d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab"} Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.072385 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-vl2vn" event={"ID":"9d6a5d15-b08c-481b-84af-88e05824b26a","Type":"ContainerDied","Data":"99e88f857ab4b78968b57ef15d5105a2ce09eb772a71e5a2943666e4d92d38e0"} Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.072398 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-p4jwm"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.072424 5010 scope.go:117] "RemoveContainer" containerID="d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.072557 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-vl2vn" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.096950 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c9fea0e4-4e18-4d7e-9af0-fd46b742565c/ovn-northd/0.log" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.096988 5010 generic.go:334] "Generic (PLEG): container finished" podID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerID="2aa7f2cde724ae9be71611e2947e9786538808ad37c2bc8674777309a8ce98ab" exitCode=2 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.097005 5010 generic.go:334] "Generic (PLEG): container finished" podID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerID="3b98cba8078e790765a3a58a436a7c3b361b88b1f2e0cfb60098baee4f4cce2a" exitCode=143 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.097024 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9fea0e4-4e18-4d7e-9af0-fd46b742565c","Type":"ContainerDied","Data":"2aa7f2cde724ae9be71611e2947e9786538808ad37c2bc8674777309a8ce98ab"} Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.097048 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9fea0e4-4e18-4d7e-9af0-fd46b742565c","Type":"ContainerDied","Data":"3b98cba8078e790765a3a58a436a7c3b361b88b1f2e0cfb60098baee4f4cce2a"} Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.119870 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron4616-account-delete-ktdvj"] Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.120408 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d6a5d15-b08c-481b-84af-88e05824b26a" containerName="openstack-network-exporter" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.120421 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d6a5d15-b08c-481b-84af-88e05824b26a" containerName="openstack-network-exporter" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.120636 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d6a5d15-b08c-481b-84af-88e05824b26a" containerName="openstack-network-exporter" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.123666 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.149739 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6a5d15-b08c-481b-84af-88e05824b26a-config\") pod \"9d6a5d15-b08c-481b-84af-88e05824b26a\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.149788 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-combined-ca-bundle\") pod \"9d6a5d15-b08c-481b-84af-88e05824b26a\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.149842 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hglv6\" (UniqueName: \"kubernetes.io/projected/9d6a5d15-b08c-481b-84af-88e05824b26a-kube-api-access-hglv6\") pod \"9d6a5d15-b08c-481b-84af-88e05824b26a\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.149901 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovs-rundir\") pod \"9d6a5d15-b08c-481b-84af-88e05824b26a\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.149945 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-metrics-certs-tls-certs\") pod \"9d6a5d15-b08c-481b-84af-88e05824b26a\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.149998 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovn-rundir\") pod \"9d6a5d15-b08c-481b-84af-88e05824b26a\" (UID: \"9d6a5d15-b08c-481b-84af-88e05824b26a\") " Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.150524 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "9d6a5d15-b08c-481b-84af-88e05824b26a" (UID: "9d6a5d15-b08c-481b-84af-88e05824b26a"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.152184 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d6a5d15-b08c-481b-84af-88e05824b26a-config" (OuterVolumeSpecName: "config") pod "9d6a5d15-b08c-481b-84af-88e05824b26a" (UID: "9d6a5d15-b08c-481b-84af-88e05824b26a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.152367 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron4616-account-delete-ktdvj"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.152421 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "9d6a5d15-b08c-481b-84af-88e05824b26a" (UID: "9d6a5d15-b08c-481b-84af-88e05824b26a"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.159931 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.160256 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="openstack-network-exporter" containerID="cri-o://969966e67ae90d742a77f84466bf294b5b02f4399d3b508d206d36643320950e" gracePeriod=300 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.162919 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d6a5d15-b08c-481b-84af-88e05824b26a-kube-api-access-hglv6" (OuterVolumeSpecName: "kube-api-access-hglv6") pod "9d6a5d15-b08c-481b-84af-88e05824b26a" (UID: "9d6a5d15-b08c-481b-84af-88e05824b26a"). InnerVolumeSpecName "kube-api-access-hglv6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.177780 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-gpxv5"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.196675 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-gpxv5"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.211464 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-hhsq2"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.225323 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9d6a5d15-b08c-481b-84af-88e05824b26a" (UID: "9d6a5d15-b08c-481b-84af-88e05824b26a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.256353 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgq4l\" (UniqueName: \"kubernetes.io/projected/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-kube-api-access-bgq4l\") pod \"neutron4616-account-delete-ktdvj\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.256402 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-operator-scripts\") pod \"neutron4616-account-delete-ktdvj\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.256662 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d6a5d15-b08c-481b-84af-88e05824b26a-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.256676 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.256692 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hglv6\" (UniqueName: \"kubernetes.io/projected/9d6a5d15-b08c-481b-84af-88e05824b26a-kube-api-access-hglv6\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.256701 5010 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovs-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.256749 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9d6a5d15-b08c-481b-84af-88e05824b26a-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.272966 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-hhsq2"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.364381 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgq4l\" (UniqueName: \"kubernetes.io/projected/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-kube-api-access-bgq4l\") pod \"neutron4616-account-delete-ktdvj\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.364457 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-operator-scripts\") pod \"neutron4616-account-delete-ktdvj\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.364985 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-hdcdl"] Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.365316 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02 is running failed: container process not found" containerID="e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.368857 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02 is running failed: container process not found" containerID="e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.378369 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-operator-scripts\") pod \"neutron4616-account-delete-ktdvj\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.383047 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02 is running failed: container process not found" containerID="e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02" cmd=["/usr/local/bin/container-scripts/ovn_controller_readiness.sh"] Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.383128 5010 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02 is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-nbrh7" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.413834 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgq4l\" (UniqueName: \"kubernetes.io/projected/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-kube-api-access-bgq4l\") pod \"neutron4616-account-delete-ktdvj\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.454525 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.456129 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-hdcdl"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.456614 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "9d6a5d15-b08c-481b-84af-88e05824b26a" (UID: "9d6a5d15-b08c-481b-84af-88e05824b26a"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.459510 5010 scope.go:117] "RemoveContainer" containerID="d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.466835 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6a5d15-b08c-481b-84af-88e05824b26a-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.482761 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.483399 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="openstack-network-exporter" containerID="cri-o://76be070eec53259277ed1ae9bfb0c4bc5bd14cf0b5a29ace0f621ed64c42f411" gracePeriod=300 Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.489226 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab\": container with ID starting with d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab not found: ID does not exist" containerID="d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.489272 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab"} err="failed to get container status \"d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab\": rpc error: code = NotFound desc = could not find container \"d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab\": container with ID starting with d695bf76697a5b3b9c2a35aeb0d9d4eb839d6995586c29afbd210e749dc05dab not found: ID does not exist" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.500800 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.501168 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="cinder-scheduler" containerID="cri-o://9d2743e44261566a3d1c21cfb428ead84b6cb4a37eb99da6c1bacd18a1287645" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.501862 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="probe" containerID="cri-o://481fb5fc62e4c26b123d374b9f9cf253a34458e076080fc635b7a5858df4367e" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.563868 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-h78d6"] Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.573256 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.573325 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data podName:9940cbe6-c323-4320-9e45-463e5c023156 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:46.573310557 +0000 UTC m=+1587.364027705 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data") pod "rabbitmq-server-0" (UID: "9940cbe6-c323-4320-9e45-463e5c023156") : configmap "rabbitmq-config-data" not found Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.613788 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-h78d6"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.662785 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.663081 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api-log" containerID="cri-o://7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.663586 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api" containerID="cri-o://7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.684058 5010 configmap.go:193] Couldn't get configMap openstack/ovncontroller-scripts: configmap "ovncontroller-scripts" not found Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.684133 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts podName:3261dde1-64a6-4fe7-851e-4a5754444fd0 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:47.684113459 +0000 UTC m=+1588.474830607 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "scripts" (UniqueName: "kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts") pod "ovn-controller-nbrh7" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0") : configmap "ovncontroller-scripts" not found Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.719569 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.719932 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-log" containerID="cri-o://1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.720657 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-httpd" containerID="cri-o://9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.742827 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743245 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-server" containerID="cri-o://55e059be841df7938e11264822fec73874738f64ef6b875efb95510e6965cf1b" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743348 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="swift-recon-cron" containerID="cri-o://e08bba5d0a854ba8aa4fb7af34e20011b90f803f2ae0c820fde74890a8ed506d" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743395 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="rsync" containerID="cri-o://956193edff3817c0a6aaac66e75e2a2cbc0c70d7f96f5cf29968a35548725373" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743430 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-expirer" containerID="cri-o://2aac1aac86049fceb0d32a0aa7530aacebb03989a907a006110f6386991013b9" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743462 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-updater" containerID="cri-o://b290a5f7ec51985b250b6f158fb41d40ac9ddeab529cc0032fbce6f190f4fde3" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743498 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-auditor" containerID="cri-o://75089565aaa9cf8b99c1bbb2c38ff4c538bc9761ad1f7d65a1db0333de3c360e" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743529 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-replicator" containerID="cri-o://1227084a08d26738373e26d1eaa54ec1c0e0c92d3d3601f6a05af2770c69551e" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743558 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-server" containerID="cri-o://ec35df7082d1bd361a74495ef68869fe5465b44b7de7cab15bbe9c7d46d0924f" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743587 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-updater" containerID="cri-o://e0678f8f20e1d205632c07cb24e8ce9e89576b47c8ef44f378b9a0a0dfb4ed62" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743622 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-auditor" containerID="cri-o://4b0402574e5cf70154b6681989bbdbd847b3e31c0811a89c6cfcc7aaf711a5f1" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743654 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-replicator" containerID="cri-o://6d794e589ecc207f0a022410f47d3aa359d8e1b3c5503eda2b2b369e69a171cf" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743687 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-server" containerID="cri-o://18b9616512ee9afb2cfc002c2a3a4b7c6722774ff0238f548f51aad7f1e695a8" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743742 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-reaper" containerID="cri-o://2315d69e082e6c260094225fe89d5d8817821a2dcf66915354208ff345c9a274" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.743777 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-auditor" containerID="cri-o://df9dfb68b38080d2f2517a40a46d8ae91eb3eca11c141ff220a21e22ce48690a" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.744386 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-replicator" containerID="cri-o://a92b03349dda704cc51977b5cdd2fcdd40871b506d74796925290a6da4ceb86e" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.788801 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.789226 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-log" containerID="cri-o://547cc5858c244164a45ddd7c0b27e3033da3950ba49796f26212abc8845b9246" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.789402 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-httpd" containerID="cri-o://e14aef587918296a922d16942a038b94eb34c104faed82cb3cae2790e3e19fba" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.795792 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.795857 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data podName:a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:47.795840874 +0000 UTC m=+1588.586558022 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data") pod "rabbitmq-cell1-server-0" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25") : configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.853701 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.854229 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-log" containerID="cri-o://c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.855190 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-metadata" containerID="cri-o://ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.873635 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="ovsdbserver-nb" containerID="cri-o://8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb" gracePeriod=300 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.893414 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:52:45 crc kubenswrapper[5010]: E1126 15:52:45.893616 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.909859 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="ovsdbserver-sb" containerID="cri-o://51be9ebe273584a0cde1861b72fd331776c123194b60da0b480c2bacde3385dc" gracePeriod=300 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.923765 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bc7a115-86d7-4b71-8cae-92ce9ca14167" path="/var/lib/kubelet/pods/1bc7a115-86d7-4b71-8cae-92ce9ca14167/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.931529 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="260f1345-096d-4d94-901e-943c3d9e4135" path="/var/lib/kubelet/pods/260f1345-096d-4d94-901e-943c3d9e4135/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.934680 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f9a616d-7152-417c-a196-c16c881631c3" path="/var/lib/kubelet/pods/2f9a616d-7152-417c-a196-c16c881631c3/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.935669 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="647fcd2c-c729-4401-95f8-c38dede33299" path="/var/lib/kubelet/pods/647fcd2c-c729-4401-95f8-c38dede33299/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.936212 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="659b75fb-742f-4166-ab4b-e5015d05ccc1" path="/var/lib/kubelet/pods/659b75fb-742f-4166-ab4b-e5015d05ccc1/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.939678 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91f360e4-480f-4398-9d5c-c19e3146a160" path="/var/lib/kubelet/pods/91f360e4-480f-4398-9d5c-c19e3146a160/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.940186 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="953ac15c-533c-4abd-ae8b-e5b8108da094" path="/var/lib/kubelet/pods/953ac15c-533c-4abd-ae8b-e5b8108da094/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.940695 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea113023-3903-4ab3-b036-80328c6ba6ca" path="/var/lib/kubelet/pods/ea113023-3903-4ab3-b036-80328c6ba6ca/volumes" Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.944126 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.949997 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.950285 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-log" containerID="cri-o://26f349f0d4a74599a92410c53237aaac653bda0a60be6f0aa87a4a0d24166ef0" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.950485 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-api" containerID="cri-o://8b5f9be0c133e2c0d365af8abb3b23cff3165b9fc4853de720fd9f31b4b01e06" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.963867 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5c9c764c5c-5p8zc"] Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.964195 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5c9c764c5c-5p8zc" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-api" containerID="cri-o://7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b" gracePeriod=30 Nov 26 15:52:45 crc kubenswrapper[5010]: I1126 15:52:45.964364 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5c9c764c5c-5p8zc" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-httpd" containerID="cri-o://0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.031095 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-knxrp"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.065343 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.113893 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-587c687588-ztm89"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.115211 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-587c687588-ztm89" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api-log" containerID="cri-o://9defbd037a4a2f05eca15526ffb9c48bad32cd70369ffd0dc805ef3172852686" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.115508 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-587c687588-ztm89" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api" containerID="cri-o://118a80403c8effe28594f56bbbae9975efb6bb4ecc9f75c9df702170fd76f085" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.169735 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b07e-account-create-update-wpwwt"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.199253 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="2aac1aac86049fceb0d32a0aa7530aacebb03989a907a006110f6386991013b9" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.199757 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="b290a5f7ec51985b250b6f158fb41d40ac9ddeab529cc0032fbce6f190f4fde3" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.199856 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="75089565aaa9cf8b99c1bbb2c38ff4c538bc9761ad1f7d65a1db0333de3c360e" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.199909 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="1227084a08d26738373e26d1eaa54ec1c0e0c92d3d3601f6a05af2770c69551e" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.199957 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="e0678f8f20e1d205632c07cb24e8ce9e89576b47c8ef44f378b9a0a0dfb4ed62" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200253 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="6d794e589ecc207f0a022410f47d3aa359d8e1b3c5503eda2b2b369e69a171cf" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200310 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="2315d69e082e6c260094225fe89d5d8817821a2dcf66915354208ff345c9a274" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200357 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="df9dfb68b38080d2f2517a40a46d8ae91eb3eca11c141ff220a21e22ce48690a" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200409 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="a92b03349dda704cc51977b5cdd2fcdd40871b506d74796925290a6da4ceb86e" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200531 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"2aac1aac86049fceb0d32a0aa7530aacebb03989a907a006110f6386991013b9"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200621 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"b290a5f7ec51985b250b6f158fb41d40ac9ddeab529cc0032fbce6f190f4fde3"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200685 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"75089565aaa9cf8b99c1bbb2c38ff4c538bc9761ad1f7d65a1db0333de3c360e"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200756 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"1227084a08d26738373e26d1eaa54ec1c0e0c92d3d3601f6a05af2770c69551e"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200815 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"e0678f8f20e1d205632c07cb24e8ce9e89576b47c8ef44f378b9a0a0dfb4ed62"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200869 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"6d794e589ecc207f0a022410f47d3aa359d8e1b3c5503eda2b2b369e69a171cf"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200933 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"2315d69e082e6c260094225fe89d5d8817821a2dcf66915354208ff345c9a274"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.200997 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"df9dfb68b38080d2f2517a40a46d8ae91eb3eca11c141ff220a21e22ce48690a"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.201054 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"a92b03349dda704cc51977b5cdd2fcdd40871b506d74796925290a6da4ceb86e"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.210467 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-knxrp"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.231985 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerName="rabbitmq" containerID="cri-o://d3ae680aa34c0a6c9f874b61e0efe2655d40cee16f8635aa026abbab0b4ef8b8" gracePeriod=604800 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.245893 5010 generic.go:334] "Generic (PLEG): container finished" podID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerID="e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.246017 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7" event={"ID":"3261dde1-64a6-4fe7-851e-4a5754444fd0","Type":"ContainerDied","Data":"e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.246335 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c9fea0e4-4e18-4d7e-9af0-fd46b742565c/ovn-northd/0.log" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.246607 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.282307 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-b07e-account-create-update-wpwwt"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.282523 5010 generic.go:334] "Generic (PLEG): container finished" podID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerID="acebe2f9c033b233d36e411c21de634121ada0b8473fd12b8911fcb60f8a4bba" exitCode=0 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.282601 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" event={"ID":"7e581b31-6b6d-4e32-8775-3446bcf717d9","Type":"ContainerDied","Data":"acebe2f9c033b233d36e411c21de634121ada0b8473fd12b8911fcb60f8a4bba"} Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.293544 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.309187 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb is running failed: container process not found" containerID="8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.314362 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb is running failed: container process not found" containerID="8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb" cmd=["/usr/bin/pidof","ovsdb-server"] Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.314418 5010 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-nb-0" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="ovsdbserver-nb" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.324998 5010 generic.go:334] "Generic (PLEG): container finished" podID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerID="547cc5858c244164a45ddd7c0b27e3033da3950ba49796f26212abc8845b9246" exitCode=143 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.325063 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe931cd2-6e31-4e82-a617-f028019a60c4","Type":"ContainerDied","Data":"547cc5858c244164a45ddd7c0b27e3033da3950ba49796f26212abc8845b9246"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.332039 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-cd69b7494-nmz2d"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.332285 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener-log" containerID="cri-o://ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.332361 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener" containerID="cri-o://1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.343496 5010 generic.go:334] "Generic (PLEG): container finished" podID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerID="5333a0de78b475fd78f332fa0f32083caa1395fc128350a6a203fa02b8019334" exitCode=143 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.343576 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-546d9f9b4-87p6s" event={"ID":"9651251a-a0b2-4db8-bb82-b22a707bd7ab","Type":"ContainerDied","Data":"5333a0de78b475fd78f332fa0f32083caa1395fc128350a6a203fa02b8019334"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.351737 5010 generic.go:334] "Generic (PLEG): container finished" podID="08acaf58-5c2f-4fb4-8863-846c28f8d016" containerID="e30a58057f8e14429694a2b07ec64cfe7a7ea07313dd194b06c05df065dded6f" exitCode=137 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.353333 5010 generic.go:334] "Generic (PLEG): container finished" podID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerID="1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045" exitCode=143 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.353387 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37d52190-a61c-44fb-9c9c-7966bd00e2c8","Type":"ContainerDied","Data":"1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.368180 5010 generic.go:334] "Generic (PLEG): container finished" podID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerID="969966e67ae90d742a77f84466bf294b5b02f4399d3b508d206d36643320950e" exitCode=2 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.368246 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5780f988-6f45-4fdb-9a2b-f149c0499552","Type":"ContainerDied","Data":"969966e67ae90d742a77f84466bf294b5b02f4399d3b508d206d36643320950e"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.369960 5010 generic.go:334] "Generic (PLEG): container finished" podID="228e9671-d3dc-45dd-b200-7496327ebcda" containerID="c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a" exitCode=143 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.370003 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"228e9671-d3dc-45dd-b200-7496327ebcda","Type":"ContainerDied","Data":"c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.376830 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.377117 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="e618fcce-218b-4f09-a0ae-5cad873d9aab" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://326a0e69015f09983f8703d9758f4b2d20607b2b6caf77a2247c63a4d0a164fa" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.387115 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-rundir\") pod \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.390466 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "c9fea0e4-4e18-4d7e-9af0-fd46b742565c" (UID: "c9fea0e4-4e18-4d7e-9af0-fd46b742565c"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.390818 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_776a1766-4e7d-4ea0-bd5b-18b6b352448a/ovsdbserver-sb/0.log" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.390910 5010 generic.go:334] "Generic (PLEG): container finished" podID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerID="76be070eec53259277ed1ae9bfb0c4bc5bd14cf0b5a29ace0f621ed64c42f411" exitCode=2 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.390969 5010 generic.go:334] "Generic (PLEG): container finished" podID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerID="51be9ebe273584a0cde1861b72fd331776c123194b60da0b480c2bacde3385dc" exitCode=143 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391065 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-combined-ca-bundle\") pod \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391120 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-config\") pod \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391238 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-scripts\") pod \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391312 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-northd-tls-certs\") pod \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391072 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"776a1766-4e7d-4ea0-bd5b-18b6b352448a","Type":"ContainerDied","Data":"76be070eec53259277ed1ae9bfb0c4bc5bd14cf0b5a29ace0f621ed64c42f411"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391436 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bffsw\" (UniqueName: \"kubernetes.io/projected/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-kube-api-access-bffsw\") pod \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391480 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-metrics-certs-tls-certs\") pod \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\" (UID: \"c9fea0e4-4e18-4d7e-9af0-fd46b742565c\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.391528 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"776a1766-4e7d-4ea0-bd5b-18b6b352448a","Type":"ContainerDied","Data":"51be9ebe273584a0cde1861b72fd331776c123194b60da0b480c2bacde3385dc"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.392337 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-scripts" (OuterVolumeSpecName: "scripts") pod "c9fea0e4-4e18-4d7e-9af0-fd46b742565c" (UID: "c9fea0e4-4e18-4d7e-9af0-fd46b742565c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.392754 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-config" (OuterVolumeSpecName: "config") pod "c9fea0e4-4e18-4d7e-9af0-fd46b742565c" (UID: "c9fea0e4-4e18-4d7e-9af0-fd46b742565c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.392756 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.392817 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.393760 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7fbcbc6747-lkhxw"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.394012 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker-log" containerID="cri-o://91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.394316 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker" containerID="cri-o://256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.416078 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-kube-api-access-bffsw" (OuterVolumeSpecName: "kube-api-access-bffsw") pod "c9fea0e4-4e18-4d7e-9af0-fd46b742565c" (UID: "c9fea0e4-4e18-4d7e-9af0-fd46b742565c"). InnerVolumeSpecName "kube-api-access-bffsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.435454 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-vl2vn"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.446795 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-vl2vn"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.466253 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.469859 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" containerID="cri-o://5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" gracePeriod=28 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.474624 5010 generic.go:334] "Generic (PLEG): container finished" podID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerID="7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5" exitCode=143 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.474668 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd","Type":"ContainerDied","Data":"7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5"} Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.492559 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement0cf4-account-delete-xrw9x"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.494719 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.494739 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bffsw\" (UniqueName: \"kubernetes.io/projected/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-kube-api-access-bffsw\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.541768 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c9fea0e4-4e18-4d7e-9af0-fd46b742565c" (UID: "c9fea0e4-4e18-4d7e-9af0-fd46b742565c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.576921 5010 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Nov 26 15:52:46 crc kubenswrapper[5010]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 15:52:46 crc kubenswrapper[5010]: + source /usr/local/bin/container-scripts/functions Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNBridge=br-int Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNRemote=tcp:localhost:6642 Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNEncapType=geneve Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNAvailabilityZones= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ EnableChassisAsGateway=true Nov 26 15:52:46 crc kubenswrapper[5010]: ++ PhysicalNetworks= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNHostName= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 15:52:46 crc kubenswrapper[5010]: ++ ovs_dir=/var/lib/openvswitch Nov 26 15:52:46 crc kubenswrapper[5010]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 15:52:46 crc kubenswrapper[5010]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 15:52:46 crc kubenswrapper[5010]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + cleanup_ovsdb_server_semaphore Nov 26 15:52:46 crc kubenswrapper[5010]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 15:52:46 crc kubenswrapper[5010]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 15:52:46 crc kubenswrapper[5010]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-f7n92" message=< Nov 26 15:52:46 crc kubenswrapper[5010]: Exiting ovsdb-server (5) [ OK ] Nov 26 15:52:46 crc kubenswrapper[5010]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 15:52:46 crc kubenswrapper[5010]: + source /usr/local/bin/container-scripts/functions Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNBridge=br-int Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNRemote=tcp:localhost:6642 Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNEncapType=geneve Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNAvailabilityZones= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ EnableChassisAsGateway=true Nov 26 15:52:46 crc kubenswrapper[5010]: ++ PhysicalNetworks= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNHostName= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 15:52:46 crc kubenswrapper[5010]: ++ ovs_dir=/var/lib/openvswitch Nov 26 15:52:46 crc kubenswrapper[5010]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 15:52:46 crc kubenswrapper[5010]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 15:52:46 crc kubenswrapper[5010]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + cleanup_ovsdb_server_semaphore Nov 26 15:52:46 crc kubenswrapper[5010]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 15:52:46 crc kubenswrapper[5010]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 15:52:46 crc kubenswrapper[5010]: > Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.576967 5010 kuberuntime_container.go:691] "PreStop hook failed" err=< Nov 26 15:52:46 crc kubenswrapper[5010]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Nov 26 15:52:46 crc kubenswrapper[5010]: + source /usr/local/bin/container-scripts/functions Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNBridge=br-int Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNRemote=tcp:localhost:6642 Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNEncapType=geneve Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNAvailabilityZones= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ EnableChassisAsGateway=true Nov 26 15:52:46 crc kubenswrapper[5010]: ++ PhysicalNetworks= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ OVNHostName= Nov 26 15:52:46 crc kubenswrapper[5010]: ++ DB_FILE=/etc/openvswitch/conf.db Nov 26 15:52:46 crc kubenswrapper[5010]: ++ ovs_dir=/var/lib/openvswitch Nov 26 15:52:46 crc kubenswrapper[5010]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Nov 26 15:52:46 crc kubenswrapper[5010]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Nov 26 15:52:46 crc kubenswrapper[5010]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + sleep 0.5 Nov 26 15:52:46 crc kubenswrapper[5010]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Nov 26 15:52:46 crc kubenswrapper[5010]: + cleanup_ovsdb_server_semaphore Nov 26 15:52:46 crc kubenswrapper[5010]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Nov 26 15:52:46 crc kubenswrapper[5010]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Nov 26 15:52:46 crc kubenswrapper[5010]: > pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" containerID="cri-o://b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.576999 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" containerID="cri-o://b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" gracePeriod=28 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.591926 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "c9fea0e4-4e18-4d7e-9af0-fd46b742565c" (UID: "c9fea0e4-4e18-4d7e-9af0-fd46b742565c"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.598093 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.598372 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.602753 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 15:52:46 crc kubenswrapper[5010]: E1126 15:52:46.602845 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data podName:9940cbe6-c323-4320-9e45-463e5c023156 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:48.60282328 +0000 UTC m=+1589.393540428 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data") pod "rabbitmq-server-0" (UID: "9940cbe6-c323-4320-9e45-463e5c023156") : configmap "rabbitmq-config-data" not found Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.629621 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="9940cbe6-c323-4320-9e45-463e5c023156" containerName="rabbitmq" containerID="cri-o://e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6" gracePeriod=604800 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.642562 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.642823 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="1afd71d7-914c-4e41-b04f-0325049fa972" containerName="galera" containerID="cri-o://fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.651021 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.692350 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "c9fea0e4-4e18-4d7e-9af0-fd46b742565c" (UID: "c9fea0e4-4e18-4d7e-9af0-fd46b742565c"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.698203 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-cell1-novncproxy-0" podUID="e618fcce-218b-4f09-a0ae-5cad873d9aab" containerName="nova-cell1-novncproxy-novncproxy" probeResult="failure" output="Get \"https://10.217.0.202:6080/vnc_lite.html\": dial tcp 10.217.0.202:6080: connect: connection refused" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.718346 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fea0e4-4e18-4d7e-9af0-fd46b742565c-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.745287 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder3420-account-delete-8w2px"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.788666 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glancec7b0-account-delete-9tpdl"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820428 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-config\") pod \"7e581b31-6b6d-4e32-8775-3446bcf717d9\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820519 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts\") pod \"3261dde1-64a6-4fe7-851e-4a5754444fd0\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820599 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-nb\") pod \"7e581b31-6b6d-4e32-8775-3446bcf717d9\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820629 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run\") pod \"3261dde1-64a6-4fe7-851e-4a5754444fd0\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820692 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-ovn-controller-tls-certs\") pod \"3261dde1-64a6-4fe7-851e-4a5754444fd0\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820721 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-log-ovn\") pod \"3261dde1-64a6-4fe7-851e-4a5754444fd0\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820792 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-sb\") pod \"7e581b31-6b6d-4e32-8775-3446bcf717d9\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820816 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-swift-storage-0\") pod \"7e581b31-6b6d-4e32-8775-3446bcf717d9\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820850 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-svc\") pod \"7e581b31-6b6d-4e32-8775-3446bcf717d9\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820871 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6s675\" (UniqueName: \"kubernetes.io/projected/7e581b31-6b6d-4e32-8775-3446bcf717d9-kube-api-access-6s675\") pod \"7e581b31-6b6d-4e32-8775-3446bcf717d9\" (UID: \"7e581b31-6b6d-4e32-8775-3446bcf717d9\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820925 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run-ovn\") pod \"3261dde1-64a6-4fe7-851e-4a5754444fd0\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820950 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-combined-ca-bundle\") pod \"3261dde1-64a6-4fe7-851e-4a5754444fd0\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.820975 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82n4f\" (UniqueName: \"kubernetes.io/projected/3261dde1-64a6-4fe7-851e-4a5754444fd0-kube-api-access-82n4f\") pod \"3261dde1-64a6-4fe7-851e-4a5754444fd0\" (UID: \"3261dde1-64a6-4fe7-851e-4a5754444fd0\") " Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.821292 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "3261dde1-64a6-4fe7-851e-4a5754444fd0" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.821705 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "3261dde1-64a6-4fe7-851e-4a5754444fd0" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.822452 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run" (OuterVolumeSpecName: "var-run") pod "3261dde1-64a6-4fe7-851e-4a5754444fd0" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.823902 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts" (OuterVolumeSpecName: "scripts") pod "3261dde1-64a6-4fe7-851e-4a5754444fd0" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.872051 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e581b31-6b6d-4e32-8775-3446bcf717d9-kube-api-access-6s675" (OuterVolumeSpecName: "kube-api-access-6s675") pod "7e581b31-6b6d-4e32-8775-3446bcf717d9" (UID: "7e581b31-6b6d-4e32-8775-3446bcf717d9"). InnerVolumeSpecName "kube-api-access-6s675". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.897565 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3261dde1-64a6-4fe7-851e-4a5754444fd0-kube-api-access-82n4f" (OuterVolumeSpecName: "kube-api-access-82n4f") pod "3261dde1-64a6-4fe7-851e-4a5754444fd0" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0"). InnerVolumeSpecName "kube-api-access-82n4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.926448 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6s675\" (UniqueName: \"kubernetes.io/projected/7e581b31-6b6d-4e32-8775-3446bcf717d9-kube-api-access-6s675\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.926490 5010 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.926500 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82n4f\" (UniqueName: \"kubernetes.io/projected/3261dde1-64a6-4fe7-851e-4a5754444fd0-kube-api-access-82n4f\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.926529 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/3261dde1-64a6-4fe7-851e-4a5754444fd0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.926539 5010 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.926548 5010 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/3261dde1-64a6-4fe7-851e-4a5754444fd0-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.955225 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.955468 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" containerName="nova-scheduler-scheduler" containerID="cri-o://a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" gracePeriod=30 Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.983977 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novaapif128-account-delete-msqb5"] Nov 26 15:52:46 crc kubenswrapper[5010]: I1126 15:52:46.994341 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.067781 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.068018 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="1cfc9265-de84-4047-9e01-69444aa4d9f5" containerName="nova-cell1-conductor-conductor" containerID="cri-o://f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" gracePeriod=30 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.090863 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-brgbd"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.102607 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-brgbd"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.122034 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_776a1766-4e7d-4ea0-bd5b-18b6b352448a/ovsdbserver-sb/0.log" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.122365 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.122972 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-56krg"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.143414 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config\") pod \"08acaf58-5c2f-4fb4-8863-846c28f8d016\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.152897 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-56krg"] Nov 26 15:52:47 crc kubenswrapper[5010]: W1126 15:52:47.153054 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37e7e487_28ea_405b_a645_a85aa94e12d2.slice/crio-a9408ad09b4d613260f3e8fc0bb6d6a096129bbf38072da73b83eb2e4f57c5e2 WatchSource:0}: Error finding container a9408ad09b4d613260f3e8fc0bb6d6a096129bbf38072da73b83eb2e4f57c5e2: Status 404 returned error can't find the container with id a9408ad09b4d613260f3e8fc0bb6d6a096129bbf38072da73b83eb2e4f57c5e2 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.164992 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config-secret\") pod \"08acaf58-5c2f-4fb4-8863-846c28f8d016\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167116 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdb-rundir\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167161 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2k59x\" (UniqueName: \"kubernetes.io/projected/08acaf58-5c2f-4fb4-8863-846c28f8d016-kube-api-access-2k59x\") pod \"08acaf58-5c2f-4fb4-8863-846c28f8d016\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167204 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pz2c\" (UniqueName: \"kubernetes.io/projected/776a1766-4e7d-4ea0-bd5b-18b6b352448a-kube-api-access-8pz2c\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167268 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-combined-ca-bundle\") pod \"08acaf58-5c2f-4fb4-8863-846c28f8d016\" (UID: \"08acaf58-5c2f-4fb4-8863-846c28f8d016\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167322 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-combined-ca-bundle\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167358 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdbserver-sb-tls-certs\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167465 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-scripts\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167489 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-metrics-certs-tls-certs\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167514 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.167544 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-config\") pod \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\" (UID: \"776a1766-4e7d-4ea0-bd5b-18b6b352448a\") " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.172822 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.173641 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-scripts" (OuterVolumeSpecName: "scripts") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.182866 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-config" (OuterVolumeSpecName: "config") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.182880 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.182912 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.184424 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/776a1766-4e7d-4ea0-bd5b-18b6b352448a-kube-api-access-8pz2c" (OuterVolumeSpecName: "kube-api-access-8pz2c") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "kube-api-access-8pz2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.185486 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage10-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "local-storage10-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.187961 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "08acaf58-5c2f-4fb4-8863-846c28f8d016" (UID: "08acaf58-5c2f-4fb4-8863-846c28f8d016"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.192081 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.192310 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="3c00abcf-4e27-48ae-be52-a92cbd24957c" containerName="nova-cell0-conductor-conductor" containerID="cri-o://b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" gracePeriod=30 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.216821 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08acaf58-5c2f-4fb4-8863-846c28f8d016-kube-api-access-2k59x" (OuterVolumeSpecName: "kube-api-access-2k59x") pod "08acaf58-5c2f-4fb4-8863-846c28f8d016" (UID: "08acaf58-5c2f-4fb4-8863-846c28f8d016"). InnerVolumeSpecName "kube-api-access-2k59x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.288407 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.289955 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.289982 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" " Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.289992 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/776a1766-4e7d-4ea0-bd5b-18b6b352448a-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.290003 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.290014 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2k59x\" (UniqueName: \"kubernetes.io/projected/08acaf58-5c2f-4fb4-8863-846c28f8d016-kube-api-access-2k59x\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.290024 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pz2c\" (UniqueName: \"kubernetes.io/projected/776a1766-4e7d-4ea0-bd5b-18b6b352448a-kube-api-access-8pz2c\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.345553 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican920e-account-delete-bv9zw"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.434353 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7e581b31-6b6d-4e32-8775-3446bcf717d9" (UID: "7e581b31-6b6d-4e32-8775-3446bcf717d9"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.462965 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/novacell0e911-account-delete-8hd9j"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.479085 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage10-crc" (UniqueName: "kubernetes.io/local-volume/local-storage10-crc") on node "crc" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.495541 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.495966 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.496604 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7e581b31-6b6d-4e32-8775-3446bcf717d9" (UID: "7e581b31-6b6d-4e32-8775-3446bcf717d9"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.502287 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-config" (OuterVolumeSpecName: "config") pod "7e581b31-6b6d-4e32-8775-3446bcf717d9" (UID: "7e581b31-6b6d-4e32-8775-3446bcf717d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.507477 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron4616-account-delete-ktdvj"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.507763 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3261dde1-64a6-4fe7-851e-4a5754444fd0" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: E1126 15:52:47.508033 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.512553 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "3261dde1-64a6-4fe7-851e-4a5754444fd0" (UID: "3261dde1-64a6-4fe7-851e-4a5754444fd0"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.527642 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "08acaf58-5c2f-4fb4-8863-846c28f8d016" (UID: "08acaf58-5c2f-4fb4-8863-846c28f8d016"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: E1126 15:52:47.527964 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.530474 5010 generic.go:334] "Generic (PLEG): container finished" podID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerID="481fb5fc62e4c26b123d374b9f9cf253a34458e076080fc635b7a5858df4367e" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.530505 5010 generic.go:334] "Generic (PLEG): container finished" podID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerID="9d2743e44261566a3d1c21cfb428ead84b6cb4a37eb99da6c1bacd18a1287645" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.530573 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0180fc92-954c-4857-9caf-4b4e5ca0c214","Type":"ContainerDied","Data":"481fb5fc62e4c26b123d374b9f9cf253a34458e076080fc635b7a5858df4367e"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.530600 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0180fc92-954c-4857-9caf-4b4e5ca0c214","Type":"ContainerDied","Data":"9d2743e44261566a3d1c21cfb428ead84b6cb4a37eb99da6c1bacd18a1287645"} Nov 26 15:52:47 crc kubenswrapper[5010]: E1126 15:52:47.533048 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:47 crc kubenswrapper[5010]: E1126 15:52:47.533094 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="3c00abcf-4e27-48ae-be52-a92cbd24957c" containerName="nova-cell0-conductor-conductor" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.547138 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7e581b31-6b6d-4e32-8775-3446bcf717d9" (UID: "7e581b31-6b6d-4e32-8775-3446bcf717d9"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.554318 5010 generic.go:334] "Generic (PLEG): container finished" podID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerID="91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e" exitCode=143 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.554392 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" event={"ID":"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a","Type":"ContainerDied","Data":"91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.565990 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder3420-account-delete-8w2px" event={"ID":"7e02370f-1b63-47f7-8d66-ba7c94310c38","Type":"ContainerStarted","Data":"a8a98307122d033d08e0bbee5ea734e989366262a2b5c3dc07d80c012b48e1fc"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.582629 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" event={"ID":"7e581b31-6b6d-4e32-8775-3446bcf717d9","Type":"ContainerDied","Data":"a74e79376bec79612d98c04fba6caef1dd6b711a147e6145a27e98d5f0838c89"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.582691 5010 scope.go:117] "RemoveContainer" containerID="acebe2f9c033b233d36e411c21de634121ada0b8473fd12b8911fcb60f8a4bba" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.583838 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-755bdc5489-xpxxn" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.587692 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "7e581b31-6b6d-4e32-8775-3446bcf717d9" (UID: "7e581b31-6b6d-4e32-8775-3446bcf717d9"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.589089 5010 generic.go:334] "Generic (PLEG): container finished" podID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerID="ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e" exitCode=143 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.589144 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" event={"ID":"9687c9f4-9131-4c43-a1f2-2faf3040e499","Type":"ContainerDied","Data":"ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603381 5010 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603420 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603430 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603442 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603450 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7e581b31-6b6d-4e32-8775-3446bcf717d9-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603472 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603484 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/3261dde1-64a6-4fe7-851e-4a5754444fd0-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.603948 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5780f988-6f45-4fdb-9a2b-f149c0499552/ovsdbserver-nb/0.log" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.605181 5010 generic.go:334] "Generic (PLEG): container finished" podID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerID="8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb" exitCode=143 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.605255 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5780f988-6f45-4fdb-9a2b-f149c0499552","Type":"ContainerDied","Data":"8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.605291 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"5780f988-6f45-4fdb-9a2b-f149c0499552","Type":"ContainerDied","Data":"26d6f1cf84cd8abc09967ed7f818439e51559f49fe4de7feea9c5f0dc231260a"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.605306 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26d6f1cf84cd8abc09967ed7f818439e51559f49fe4de7feea9c5f0dc231260a" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.608120 5010 generic.go:334] "Generic (PLEG): container finished" podID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerID="0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.608178 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9c764c5c-5p8zc" event={"ID":"5eee7686-f868-4e9e-bf61-b108eeb88bfa","Type":"ContainerDied","Data":"0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.632005 5010 generic.go:334] "Generic (PLEG): container finished" podID="e618fcce-218b-4f09-a0ae-5cad873d9aab" containerID="326a0e69015f09983f8703d9758f4b2d20607b2b6caf77a2247c63a4d0a164fa" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.632091 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e618fcce-218b-4f09-a0ae-5cad873d9aab","Type":"ContainerDied","Data":"326a0e69015f09983f8703d9758f4b2d20607b2b6caf77a2247c63a4d0a164fa"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.640980 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.646120 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "08acaf58-5c2f-4fb4-8863-846c28f8d016" (UID: "08acaf58-5c2f-4fb4-8863-846c28f8d016"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649697 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="956193edff3817c0a6aaac66e75e2a2cbc0c70d7f96f5cf29968a35548725373" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649737 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="ec35df7082d1bd361a74495ef68869fe5465b44b7de7cab15bbe9c7d46d0924f" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649748 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="4b0402574e5cf70154b6681989bbdbd847b3e31c0811a89c6cfcc7aaf711a5f1" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649756 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="18b9616512ee9afb2cfc002c2a3a4b7c6722774ff0238f548f51aad7f1e695a8" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649762 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="55e059be841df7938e11264822fec73874738f64ef6b875efb95510e6965cf1b" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649805 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"956193edff3817c0a6aaac66e75e2a2cbc0c70d7f96f5cf29968a35548725373"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649831 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"ec35df7082d1bd361a74495ef68869fe5465b44b7de7cab15bbe9c7d46d0924f"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649843 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"4b0402574e5cf70154b6681989bbdbd847b3e31c0811a89c6cfcc7aaf711a5f1"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649852 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"18b9616512ee9afb2cfc002c2a3a4b7c6722774ff0238f548f51aad7f1e695a8"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.649860 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"55e059be841df7938e11264822fec73874738f64ef6b875efb95510e6965cf1b"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.652351 5010 generic.go:334] "Generic (PLEG): container finished" podID="92c26092-3d97-417f-aaa7-48723d6c88be" containerID="26f349f0d4a74599a92410c53237aaac653bda0a60be6f0aa87a4a0d24166ef0" exitCode=143 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.652407 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"92c26092-3d97-417f-aaa7-48723d6c88be","Type":"ContainerDied","Data":"26f349f0d4a74599a92410c53237aaac653bda0a60be6f0aa87a4a0d24166ef0"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.663643 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_776a1766-4e7d-4ea0-bd5b-18b6b352448a/ovsdbserver-sb/0.log" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.664145 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.665071 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"776a1766-4e7d-4ea0-bd5b-18b6b352448a","Type":"ContainerDied","Data":"a1a9c340e7269e2e729f030ae3f170d597b82dca49c03d6efa6539817b46b698"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.677046 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican920e-account-delete-bv9zw" event={"ID":"0a45c0f6-649b-4b48-8245-4f70da1c3a4f","Type":"ContainerStarted","Data":"3872257d041b635dcd61201eef6e22c1fe1acc84114d7490bc52b8ea76b30947"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.685702 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif128-account-delete-msqb5" event={"ID":"37e7e487-28ea-405b-a645-a85aa94e12d2","Type":"ContainerStarted","Data":"a9408ad09b4d613260f3e8fc0bb6d6a096129bbf38072da73b83eb2e4f57c5e2"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.702056 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "776a1766-4e7d-4ea0-bd5b-18b6b352448a" (UID: "776a1766-4e7d-4ea0-bd5b-18b6b352448a"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.705796 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.705848 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/08acaf58-5c2f-4fb4-8863-846c28f8d016-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.705861 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/776a1766-4e7d-4ea0-bd5b-18b6b352448a-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.715576 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_c9fea0e4-4e18-4d7e-9af0-fd46b742565c/ovn-northd/0.log" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.715654 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"c9fea0e4-4e18-4d7e-9af0-fd46b742565c","Type":"ContainerDied","Data":"9cabee4cf956a9f8bba6dd4f186f505519eea586ef206c92ad2e1d18b25f952a"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.715757 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.724405 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-nbrh7" event={"ID":"3261dde1-64a6-4fe7-851e-4a5754444fd0","Type":"ContainerDied","Data":"2c056ceb03fc4922675379e167bfde32a50fa9cbfe7831a79c5efae5e491ab00"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.724515 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-nbrh7" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.770467 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement0cf4-account-delete-xrw9x" event={"ID":"e8c11462-1366-4e0f-9003-6079b25c6b04","Type":"ContainerStarted","Data":"dfeaf06140981180f1a1a2e0c95ed0d76c067b94bc7a96a4d1dbe2f41d09225e"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.770519 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement0cf4-account-delete-xrw9x" event={"ID":"e8c11462-1366-4e0f-9003-6079b25c6b04","Type":"ContainerStarted","Data":"9ecf757230d66a5baf0e952cf87db376c67b755faf3e81855703e55b2ff4fae4"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.786702 5010 generic.go:334] "Generic (PLEG): container finished" podID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" exitCode=0 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.786794 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerDied","Data":"b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.796444 5010 generic.go:334] "Generic (PLEG): container finished" podID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerID="9defbd037a4a2f05eca15526ffb9c48bad32cd70369ffd0dc805ef3172852686" exitCode=143 Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.796519 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-587c687588-ztm89" event={"ID":"0e65ad49-eec3-460d-aa80-0880c5e2e86b","Type":"ContainerDied","Data":"9defbd037a4a2f05eca15526ffb9c48bad32cd70369ffd0dc805ef3172852686"} Nov 26 15:52:47 crc kubenswrapper[5010]: E1126 15:52:47.811885 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:47 crc kubenswrapper[5010]: E1126 15:52:47.811968 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data podName:a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:51.811948967 +0000 UTC m=+1592.602666115 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data") pod "rabbitmq-cell1-server-0" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25") : configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.813054 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.816841 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glancec7b0-account-delete-9tpdl" event={"ID":"2e8dfd8a-0624-4f78-8c35-c6710328de9d","Type":"ContainerStarted","Data":"df87b2b96caa04e818d12cf1fc451399f0f69142a4865be35d9783c67ee916a0"} Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.928698 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0756834c-f4fd-4aaa-b3c9-d00fad779b4b" path="/var/lib/kubelet/pods/0756834c-f4fd-4aaa-b3c9-d00fad779b4b/volumes" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.929636 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08acaf58-5c2f-4fb4-8863-846c28f8d016" path="/var/lib/kubelet/pods/08acaf58-5c2f-4fb4-8863-846c28f8d016/volumes" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.933154 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="731566ac-0993-4f7d-a4ad-9fadd9beee04" path="/var/lib/kubelet/pods/731566ac-0993-4f7d-a4ad-9fadd9beee04/volumes" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.934666 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7432b1a0-ae0e-4db9-8295-cc11d1d657e7" path="/var/lib/kubelet/pods/7432b1a0-ae0e-4db9-8295-cc11d1d657e7/volumes" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.938542 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d6a5d15-b08c-481b-84af-88e05824b26a" path="/var/lib/kubelet/pods/9d6a5d15-b08c-481b-84af-88e05824b26a/volumes" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.955279 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e03a12d5-1d79-4780-b7cb-e752eaec9783" path="/var/lib/kubelet/pods/e03a12d5-1d79-4780-b7cb-e752eaec9783/volumes" Nov 26 15:52:47 crc kubenswrapper[5010]: I1126 15:52:47.986890 5010 scope.go:117] "RemoveContainer" containerID="3721092d2f588508ae5f89654f3575bb94f0470deb70912296aabc4108ca40d2" Nov 26 15:52:48 crc kubenswrapper[5010]: E1126 15:52:48.088452 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:52:48 crc kubenswrapper[5010]: E1126 15:52:48.093660 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:52:48 crc kubenswrapper[5010]: E1126 15:52:48.101232 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:52:48 crc kubenswrapper[5010]: E1126 15:52:48.101299 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" containerName="nova-scheduler-scheduler" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.157728 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-6d9f966b7c-7cbw2"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.157985 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-httpd" containerID="cri-o://4248baa6552f5c6e89014c848cb17cfc27c39511119002d3f8686be69996f6fe" gracePeriod=30 Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.158115 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-server" containerID="cri-o://b34d4f722a91c454494472df034a2abe16ed2231c30fd284d1678d1f21a2a6d7" gracePeriod=30 Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.173696 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_5780f988-6f45-4fdb-9a2b-f149c0499552/ovsdbserver-nb/0.log" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.174260 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.207368 5010 scope.go:117] "RemoveContainer" containerID="76be070eec53259277ed1ae9bfb0c4bc5bd14cf0b5a29ace0f621ed64c42f411" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.227204 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.227307 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdbserver-nb-tls-certs\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.228461 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdb-rundir\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.228537 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6txx\" (UniqueName: \"kubernetes.io/projected/5780f988-6f45-4fdb-9a2b-f149c0499552-kube-api-access-w6txx\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.228566 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-metrics-certs-tls-certs\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.228739 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-scripts\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.228763 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-combined-ca-bundle\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.228798 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-config\") pod \"5780f988-6f45-4fdb-9a2b-f149c0499552\" (UID: \"5780f988-6f45-4fdb-9a2b-f149c0499552\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.232545 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.232555 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-config" (OuterVolumeSpecName: "config") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.232684 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-scripts" (OuterVolumeSpecName: "scripts") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.261331 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.272020 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5780f988-6f45-4fdb-9a2b-f149c0499552-kube-api-access-w6txx" (OuterVolumeSpecName: "kube-api-access-w6txx") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "kube-api-access-w6txx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.309752 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.331382 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.331417 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6txx\" (UniqueName: \"kubernetes.io/projected/5780f988-6f45-4fdb-9a2b-f149c0499552-kube-api-access-w6txx\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.331427 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.331436 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.331445 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5780f988-6f45-4fdb-9a2b-f149c0499552-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.331468 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.360768 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.367278 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.412224 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "5780f988-6f45-4fdb-9a2b-f149c0499552" (UID: "5780f988-6f45-4fdb-9a2b-f149c0499552"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.432961 5010 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.432997 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.433006 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5780f988-6f45-4fdb-9a2b-f149c0499552-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.500366 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.520782 5010 scope.go:117] "RemoveContainer" containerID="51be9ebe273584a0cde1861b72fd331776c123194b60da0b480c2bacde3385dc" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.534767 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0180fc92-954c-4857-9caf-4b4e5ca0c214-etc-machine-id\") pod \"0180fc92-954c-4857-9caf-4b4e5ca0c214\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.534866 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data-custom\") pod \"0180fc92-954c-4857-9caf-4b4e5ca0c214\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.534882 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0180fc92-954c-4857-9caf-4b4e5ca0c214-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0180fc92-954c-4857-9caf-4b4e5ca0c214" (UID: "0180fc92-954c-4857-9caf-4b4e5ca0c214"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.534923 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-combined-ca-bundle\") pod \"0180fc92-954c-4857-9caf-4b4e5ca0c214\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.534965 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data\") pod \"0180fc92-954c-4857-9caf-4b4e5ca0c214\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.534986 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqsbd\" (UniqueName: \"kubernetes.io/projected/0180fc92-954c-4857-9caf-4b4e5ca0c214-kube-api-access-sqsbd\") pod \"0180fc92-954c-4857-9caf-4b4e5ca0c214\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.535013 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-scripts\") pod \"0180fc92-954c-4857-9caf-4b4e5ca0c214\" (UID: \"0180fc92-954c-4857-9caf-4b4e5ca0c214\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.535461 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0180fc92-954c-4857-9caf-4b4e5ca0c214-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.550487 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0180fc92-954c-4857-9caf-4b4e5ca0c214-kube-api-access-sqsbd" (OuterVolumeSpecName: "kube-api-access-sqsbd") pod "0180fc92-954c-4857-9caf-4b4e5ca0c214" (UID: "0180fc92-954c-4857-9caf-4b4e5ca0c214"). InnerVolumeSpecName "kube-api-access-sqsbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.558764 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-scripts" (OuterVolumeSpecName: "scripts") pod "0180fc92-954c-4857-9caf-4b4e5ca0c214" (UID: "0180fc92-954c-4857-9caf-4b4e5ca0c214"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.570134 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0180fc92-954c-4857-9caf-4b4e5ca0c214" (UID: "0180fc92-954c-4857-9caf-4b4e5ca0c214"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.570902 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.576405 5010 scope.go:117] "RemoveContainer" containerID="2aa7f2cde724ae9be71611e2947e9786538808ad37c2bc8674777309a8ce98ab" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.587976 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.592964 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.602881 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.608923 5010 scope.go:117] "RemoveContainer" containerID="3b98cba8078e790765a3a58a436a7c3b361b88b1f2e0cfb60098baee4f4cce2a" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.628228 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-755bdc5489-xpxxn"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637586 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dpp8m\" (UniqueName: \"kubernetes.io/projected/1afd71d7-914c-4e41-b04f-0325049fa972-kube-api-access-dpp8m\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637628 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-generated\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637674 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-galera-tls-certs\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637727 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-combined-ca-bundle\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637756 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-combined-ca-bundle\") pod \"e618fcce-218b-4f09-a0ae-5cad873d9aab\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637786 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-kolla-config\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637848 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-vencrypt-tls-certs\") pod \"e618fcce-218b-4f09-a0ae-5cad873d9aab\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637875 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-config-data\") pod \"e618fcce-218b-4f09-a0ae-5cad873d9aab\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637943 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-operator-scripts\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.637988 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6cdk\" (UniqueName: \"kubernetes.io/projected/e618fcce-218b-4f09-a0ae-5cad873d9aab-kube-api-access-m6cdk\") pod \"e618fcce-218b-4f09-a0ae-5cad873d9aab\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.638006 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-nova-novncproxy-tls-certs\") pod \"e618fcce-218b-4f09-a0ae-5cad873d9aab\" (UID: \"e618fcce-218b-4f09-a0ae-5cad873d9aab\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.638032 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-default\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.638058 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"1afd71d7-914c-4e41-b04f-0325049fa972\" (UID: \"1afd71d7-914c-4e41-b04f-0325049fa972\") " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.638440 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.638453 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqsbd\" (UniqueName: \"kubernetes.io/projected/0180fc92-954c-4857-9caf-4b4e5ca0c214-kube-api-access-sqsbd\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.638464 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: E1126 15:52:48.638516 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 15:52:48 crc kubenswrapper[5010]: E1126 15:52:48.638560 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data podName:9940cbe6-c323-4320-9e45-463e5c023156 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:52.638546083 +0000 UTC m=+1593.429263231 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data") pod "rabbitmq-server-0" (UID: "9940cbe6-c323-4320-9e45-463e5c023156") : configmap "rabbitmq-config-data" not found Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.638656 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.639060 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.639125 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.649658 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-755bdc5489-xpxxn"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.650177 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.660092 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.686112 5010 scope.go:117] "RemoveContainer" containerID="e229a1e989382ba2e318edc94ab0085d0e9c7960c16530826bd38391caf86e02" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.694677 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.699962 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e618fcce-218b-4f09-a0ae-5cad873d9aab-kube-api-access-m6cdk" (OuterVolumeSpecName: "kube-api-access-m6cdk") pod "e618fcce-218b-4f09-a0ae-5cad873d9aab" (UID: "e618fcce-218b-4f09-a0ae-5cad873d9aab"). InnerVolumeSpecName "kube-api-access-m6cdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.712490 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1afd71d7-914c-4e41-b04f-0325049fa972-kube-api-access-dpp8m" (OuterVolumeSpecName: "kube-api-access-dpp8m") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "kube-api-access-dpp8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.734902 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-nbrh7"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.742016 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dpp8m\" (UniqueName: \"kubernetes.io/projected/1afd71d7-914c-4e41-b04f-0325049fa972-kube-api-access-dpp8m\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.742048 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.742057 5010 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.742066 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.742075 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6cdk\" (UniqueName: \"kubernetes.io/projected/e618fcce-218b-4f09-a0ae-5cad873d9aab-kube-api-access-m6cdk\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.742083 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1afd71d7-914c-4e41-b04f-0325049fa972-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.747487 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-nbrh7"] Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.767994 5010 scope.go:117] "RemoveContainer" containerID="e30a58057f8e14429694a2b07ec64cfe7a7ea07313dd194b06c05df065dded6f" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.785478 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "mysql-db") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.840226 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0e911-account-delete-8hd9j" event={"ID":"35439472-3a5f-450f-9fcc-2a739253ad5b","Type":"ContainerStarted","Data":"8202821a7dfc286f3e5c7c43dbfc8afb10f37817294030b59909cdc1809690c9"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.846240 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.853635 5010 generic.go:334] "Generic (PLEG): container finished" podID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerID="4248baa6552f5c6e89014c848cb17cfc27c39511119002d3f8686be69996f6fe" exitCode=0 Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.853751 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" event={"ID":"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc","Type":"ContainerDied","Data":"4248baa6552f5c6e89014c848cb17cfc27c39511119002d3f8686be69996f6fe"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.860659 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican920e-account-delete-bv9zw" event={"ID":"0a45c0f6-649b-4b48-8245-4f70da1c3a4f","Type":"ContainerStarted","Data":"6baafe4a5f8c80877726c698f3d543e68971f894d1282f36e20f9a48993d1572"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.887236 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican920e-account-delete-bv9zw" podStartSLOduration=4.887217083 podStartE2EDuration="4.887217083s" podCreationTimestamp="2025-11-26 15:52:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:52:48.879116341 +0000 UTC m=+1589.669833489" watchObservedRunningTime="2025-11-26 15:52:48.887217083 +0000 UTC m=+1589.677934231" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.895241 5010 generic.go:334] "Generic (PLEG): container finished" podID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerID="a86003926de01550b467b33cbf762fa3bc24eb67a06d8b70ca85b43666377672" exitCode=0 Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.895314 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-546d9f9b4-87p6s" event={"ID":"9651251a-a0b2-4db8-bb82-b22a707bd7ab","Type":"ContainerDied","Data":"a86003926de01550b467b33cbf762fa3bc24eb67a06d8b70ca85b43666377672"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.908584 5010 generic.go:334] "Generic (PLEG): container finished" podID="e8c11462-1366-4e0f-9003-6079b25c6b04" containerID="dfeaf06140981180f1a1a2e0c95ed0d76c067b94bc7a96a4d1dbe2f41d09225e" exitCode=0 Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.908645 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement0cf4-account-delete-xrw9x" event={"ID":"e8c11462-1366-4e0f-9003-6079b25c6b04","Type":"ContainerDied","Data":"dfeaf06140981180f1a1a2e0c95ed0d76c067b94bc7a96a4d1dbe2f41d09225e"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.932582 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0180fc92-954c-4857-9caf-4b4e5ca0c214","Type":"ContainerDied","Data":"8f282fec68d3dec5399eeba24be7a5eff6434de0c82f418a5de8ca79565c4637"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.932762 5010 scope.go:117] "RemoveContainer" containerID="481fb5fc62e4c26b123d374b9f9cf253a34458e076080fc635b7a5858df4367e" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.932889 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.961164 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"e618fcce-218b-4f09-a0ae-5cad873d9aab","Type":"ContainerDied","Data":"d5d1fa08257d3909af44a34f881df9f2a14ad0167e2dcc341fe242a4a0df45dd"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.961169 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.984835 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4616-account-delete-ktdvj" event={"ID":"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5","Type":"ContainerStarted","Data":"d3a46fe789b64f7f7e31ca73390ee26d342047117f10028a576f19fb76dfcedd"} Nov 26 15:52:48 crc kubenswrapper[5010]: I1126 15:52:48.984877 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4616-account-delete-ktdvj" event={"ID":"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5","Type":"ContainerStarted","Data":"069ff544aa7da159d181cbe236cc40ec44ce54436f44326fbc2f3504dd7cd0e0"} Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.006878 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron4616-account-delete-ktdvj" podStartSLOduration=5.006864416 podStartE2EDuration="5.006864416s" podCreationTimestamp="2025-11-26 15:52:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:52:49.006103557 +0000 UTC m=+1589.796820705" watchObservedRunningTime="2025-11-26 15:52:49.006864416 +0000 UTC m=+1589.797581564" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.008158 5010 generic.go:334] "Generic (PLEG): container finished" podID="1afd71d7-914c-4e41-b04f-0325049fa972" containerID="fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623" exitCode=0 Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.008235 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.016197 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.016490 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1afd71d7-914c-4e41-b04f-0325049fa972","Type":"ContainerDied","Data":"fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623"} Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.016534 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"1afd71d7-914c-4e41-b04f-0325049fa972","Type":"ContainerDied","Data":"bde65109f64657d34decd49aca5e3b7a212c01cc6bf169fe8c9cdcb366ada8ce"} Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.033925 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder3420-account-delete-8w2px" podStartSLOduration=5.033883982 podStartE2EDuration="5.033883982s" podCreationTimestamp="2025-11-26 15:52:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:52:49.028073117 +0000 UTC m=+1589.818790265" watchObservedRunningTime="2025-11-26 15:52:49.033883982 +0000 UTC m=+1589.824601130" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.056179 5010 scope.go:117] "RemoveContainer" containerID="9d2743e44261566a3d1c21cfb428ead84b6cb4a37eb99da6c1bacd18a1287645" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.102464 5010 scope.go:117] "RemoveContainer" containerID="326a0e69015f09983f8703d9758f4b2d20607b2b6caf77a2247c63a4d0a164fa" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.112146 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.113943 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.136027 5010 scope.go:117] "RemoveContainer" containerID="fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.172816 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e618fcce-218b-4f09-a0ae-5cad873d9aab" (UID: "e618fcce-218b-4f09-a0ae-5cad873d9aab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.173928 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.173947 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.173957 5010 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.180413 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.196784 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.204465 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.170:8080/healthcheck\": dial tcp 10.217.0.170:8080: connect: connection refused" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.204553 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.170:8080/healthcheck\": dial tcp 10.217.0.170:8080: connect: connection refused" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.281994 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-config-data" (OuterVolumeSpecName: "config-data") pod "e618fcce-218b-4f09-a0ae-5cad873d9aab" (UID: "e618fcce-218b-4f09-a0ae-5cad873d9aab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.307443 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0180fc92-954c-4857-9caf-4b4e5ca0c214" (UID: "0180fc92-954c-4857-9caf-4b4e5ca0c214"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.311982 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "e618fcce-218b-4f09-a0ae-5cad873d9aab" (UID: "e618fcce-218b-4f09-a0ae-5cad873d9aab"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.314454 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "e618fcce-218b-4f09-a0ae-5cad873d9aab" (UID: "e618fcce-218b-4f09-a0ae-5cad873d9aab"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.342119 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1afd71d7-914c-4e41-b04f-0325049fa972" (UID: "1afd71d7-914c-4e41-b04f-0325049fa972"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.365569 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data" (OuterVolumeSpecName: "config-data") pod "0180fc92-954c-4857-9caf-4b4e5ca0c214" (UID: "0180fc92-954c-4857-9caf-4b4e5ca0c214"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.381890 5010 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.381940 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.381955 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.381966 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0180fc92-954c-4857-9caf-4b4e5ca0c214-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.381979 5010 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/e618fcce-218b-4f09-a0ae-5cad873d9aab-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.381993 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1afd71d7-914c-4e41-b04f-0325049fa972-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.398372 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": read tcp 10.217.0.2:59132->10.217.0.207:8775: read: connection reset by peer" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.399946 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": read tcp 10.217.0.2:59122->10.217.0.207:8775: read: connection reset by peer" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.630938 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-587c687588-ztm89" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:46590->10.217.0.161:9311: read: connection reset by peer" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.630997 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-587c687588-ztm89" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.161:9311/healthcheck\": read tcp 10.217.0.2:46592->10.217.0.161:9311: read: connection reset by peer" Nov 26 15:52:49 crc kubenswrapper[5010]: E1126 15:52:49.655172 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37e7e487_28ea_405b_a645_a85aa94e12d2.slice/crio-conmon-a6ad69ad2f275b095e988e7b73cf40bc7e5931361b0c0f62610c61efb5efc0da.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod92c26092_3d97_417f_aaa7_48723d6c88be.slice/crio-conmon-8b5f9be0c133e2c0d365af8abb3b23cff3165b9fc4853de720fd9f31b4b01e06.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2e8dfd8a_0624_4f78_8c35_c6710328de9d.slice/crio-130cd73fcbfef4ee3a96c354f8416b71c7b8fee2a7b71d13849287e7634e311c.scope\": RecentStats: unable to find data in memory cache]" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.928172 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" path="/var/lib/kubelet/pods/3261dde1-64a6-4fe7-851e-4a5754444fd0/volumes" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.934190 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" path="/var/lib/kubelet/pods/5780f988-6f45-4fdb-9a2b-f149c0499552/volumes" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.935213 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" path="/var/lib/kubelet/pods/776a1766-4e7d-4ea0-bd5b-18b6b352448a/volumes" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.938417 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e581b31-6b6d-4e32-8775-3446bcf717d9" path="/var/lib/kubelet/pods/7e581b31-6b6d-4e32-8775-3446bcf717d9/volumes" Nov 26 15:52:49 crc kubenswrapper[5010]: I1126 15:52:49.939294 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" path="/var/lib/kubelet/pods/c9fea0e4-4e18-4d7e-9af0-fd46b742565c/volumes" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.037075 5010 generic.go:334] "Generic (PLEG): container finished" podID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerID="b34d4f722a91c454494472df034a2abe16ed2231c30fd284d1678d1f21a2a6d7" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.037110 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" event={"ID":"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc","Type":"ContainerDied","Data":"b34d4f722a91c454494472df034a2abe16ed2231c30fd284d1678d1f21a2a6d7"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.037152 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" event={"ID":"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc","Type":"ContainerDied","Data":"be5084aac0c347562d904640aff0d31ca4f6e32ee6731847a4cbf3c42b295f02"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.037163 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be5084aac0c347562d904640aff0d31ca4f6e32ee6731847a4cbf3c42b295f02" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.050261 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.050914 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="sg-core" containerID="cri-o://f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c" gracePeriod=30 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.050990 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="proxy-httpd" containerID="cri-o://4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a" gracePeriod=30 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.051165 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-notification-agent" containerID="cri-o://573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451" gracePeriod=30 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.053883 5010 generic.go:334] "Generic (PLEG): container finished" podID="35439472-3a5f-450f-9fcc-2a739253ad5b" containerID="1df44582fa6209a465f2fb40008f0c5b3dc20f374d51a23ca41e9704deffffd1" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.053934 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0e911-account-delete-8hd9j" event={"ID":"35439472-3a5f-450f-9fcc-2a739253ad5b","Type":"ContainerDied","Data":"1df44582fa6209a465f2fb40008f0c5b3dc20f374d51a23ca41e9704deffffd1"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.054067 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-central-agent" containerID="cri-o://6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b" gracePeriod=30 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.064283 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.077928 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.078144 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="ed209eb8-b2b9-4101-9eda-2762259ea2cd" containerName="kube-state-metrics" containerID="cri-o://1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56" gracePeriod=30 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.082513 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.085916 5010 generic.go:334] "Generic (PLEG): container finished" podID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerID="118a80403c8effe28594f56bbbae9975efb6bb4ecc9f75c9df702170fd76f085" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.094631 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.085977 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-587c687588-ztm89" event={"ID":"0e65ad49-eec3-460d-aa80-0880c5e2e86b","Type":"ContainerDied","Data":"118a80403c8effe28594f56bbbae9975efb6bb4ecc9f75c9df702170fd76f085"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.114413 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.114590 5010 generic.go:334] "Generic (PLEG): container finished" podID="92c26092-3d97-417f-aaa7-48723d6c88be" containerID="8b5f9be0c133e2c0d365af8abb3b23cff3165b9fc4853de720fd9f31b4b01e06" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.114647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"92c26092-3d97-417f-aaa7-48723d6c88be","Type":"ContainerDied","Data":"8b5f9be0c133e2c0d365af8abb3b23cff3165b9fc4853de720fd9f31b4b01e06"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.114696 5010 scope.go:117] "RemoveContainer" containerID="55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.115072 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-config-data\") pod \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.117418 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-scripts\") pod \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.117470 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-internal-tls-certs\") pod \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.117510 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9651251a-a0b2-4db8-bb82-b22a707bd7ab-logs\") pod \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.117561 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-combined-ca-bundle\") pod \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.117603 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nn9td\" (UniqueName: \"kubernetes.io/projected/9651251a-a0b2-4db8-bb82-b22a707bd7ab-kube-api-access-nn9td\") pod \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.117652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-public-tls-certs\") pod \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\" (UID: \"9651251a-a0b2-4db8-bb82-b22a707bd7ab\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.129814 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9651251a-a0b2-4db8-bb82-b22a707bd7ab-kube-api-access-nn9td" (OuterVolumeSpecName: "kube-api-access-nn9td") pod "9651251a-a0b2-4db8-bb82-b22a707bd7ab" (UID: "9651251a-a0b2-4db8-bb82-b22a707bd7ab"). InnerVolumeSpecName "kube-api-access-nn9td". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.129987 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9651251a-a0b2-4db8-bb82-b22a707bd7ab-logs" (OuterVolumeSpecName: "logs") pod "9651251a-a0b2-4db8-bb82-b22a707bd7ab" (UID: "9651251a-a0b2-4db8-bb82-b22a707bd7ab"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.134468 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-scripts" (OuterVolumeSpecName: "scripts") pod "9651251a-a0b2-4db8-bb82-b22a707bd7ab" (UID: "9651251a-a0b2-4db8-bb82-b22a707bd7ab"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.137169 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.138480 5010 generic.go:334] "Generic (PLEG): container finished" podID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerID="e14aef587918296a922d16942a038b94eb34c104faed82cb3cae2790e3e19fba" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.138564 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe931cd2-6e31-4e82-a617-f028019a60c4","Type":"ContainerDied","Data":"e14aef587918296a922d16942a038b94eb34c104faed82cb3cae2790e3e19fba"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.146995 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.154580 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/kube-state-metrics-0" podUID="ed209eb8-b2b9-4101-9eda-2762259ea2cd" containerName="kube-state-metrics" probeResult="failure" output="Get \"https://10.217.0.200:8081/readyz\": dial tcp 10.217.0.200:8081: connect: connection refused" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.158364 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-546d9f9b4-87p6s" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.158420 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-546d9f9b4-87p6s" event={"ID":"9651251a-a0b2-4db8-bb82-b22a707bd7ab","Type":"ContainerDied","Data":"7988dac9d01369a5dd02344b2c07e802c64f5747e521359b11858e905402a6fc"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.177061 5010 generic.go:334] "Generic (PLEG): container finished" podID="ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5" containerID="d3a46fe789b64f7f7e31ca73390ee26d342047117f10028a576f19fb76dfcedd" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.177259 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.177285 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4616-account-delete-ktdvj" event={"ID":"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5","Type":"ContainerDied","Data":"d3a46fe789b64f7f7e31ca73390ee26d342047117f10028a576f19fb76dfcedd"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.189808 5010 generic.go:334] "Generic (PLEG): container finished" podID="37e7e487-28ea-405b-a645-a85aa94e12d2" containerID="a6ad69ad2f275b095e988e7b73cf40bc7e5931361b0c0f62610c61efb5efc0da" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.189886 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif128-account-delete-msqb5" event={"ID":"37e7e487-28ea-405b-a645-a85aa94e12d2","Type":"ContainerDied","Data":"a6ad69ad2f275b095e988e7b73cf40bc7e5931361b0c0f62610c61efb5efc0da"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.200472 5010 scope.go:117] "RemoveContainer" containerID="fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.201323 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623\": container with ID starting with fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623 not found: ID does not exist" containerID="fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.201359 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623"} err="failed to get container status \"fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623\": rpc error: code = NotFound desc = could not find container \"fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623\": container with ID starting with fb920c22afc927f70c1947b099f094bd4ae7c30731653714df3ba73fa20f1623 not found: ID does not exist" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.201380 5010 scope.go:117] "RemoveContainer" containerID="55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.206423 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396\": container with ID starting with 55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396 not found: ID does not exist" containerID="55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.206494 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396"} err="failed to get container status \"55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396\": rpc error: code = NotFound desc = could not find container \"55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396\": container with ID starting with 55c18f4f9d14c05d20ef6ef81f28d8a5e3256f4a2e88836e16c68429393eb396 not found: ID does not exist" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.206563 5010 scope.go:117] "RemoveContainer" containerID="a86003926de01550b467b33cbf762fa3bc24eb67a06d8b70ca85b43666377672" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.208267 5010 generic.go:334] "Generic (PLEG): container finished" podID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerID="9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.208347 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37d52190-a61c-44fb-9c9c-7966bd00e2c8","Type":"ContainerDied","Data":"9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.208374 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"37d52190-a61c-44fb-9c9c-7966bd00e2c8","Type":"ContainerDied","Data":"f6e18794f4e63dd4f476d10515df3ff32d55d7ea4d051925cb43bda9cf16489e"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.208438 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.212602 5010 generic.go:334] "Generic (PLEG): container finished" podID="0a45c0f6-649b-4b48-8245-4f70da1c3a4f" containerID="6baafe4a5f8c80877726c698f3d543e68971f894d1282f36e20f9a48993d1572" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.212652 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican920e-account-delete-bv9zw" event={"ID":"0a45c0f6-649b-4b48-8245-4f70da1c3a4f","Type":"ContainerDied","Data":"6baafe4a5f8c80877726c698f3d543e68971f894d1282f36e20f9a48993d1572"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.218836 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-combined-ca-bundle\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.218890 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-internal-tls-certs\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.218946 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.218969 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8m6c\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-kube-api-access-v8m6c\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.218993 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219012 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nq42x\" (UniqueName: \"kubernetes.io/projected/228e9671-d3dc-45dd-b200-7496327ebcda-kube-api-access-nq42x\") pod \"228e9671-d3dc-45dd-b200-7496327ebcda\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219081 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/228e9671-d3dc-45dd-b200-7496327ebcda-logs\") pod \"228e9671-d3dc-45dd-b200-7496327ebcda\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219122 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-httpd-run\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219157 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-run-httpd\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219175 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-logs\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219196 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-log-httpd\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219214 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-etc-swift\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219241 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-config-data\") pod \"228e9671-d3dc-45dd-b200-7496327ebcda\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219270 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs\") pod \"228e9671-d3dc-45dd-b200-7496327ebcda\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219304 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-config-data\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219322 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-public-tls-certs\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219345 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-combined-ca-bundle\") pod \"228e9671-d3dc-45dd-b200-7496327ebcda\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219366 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6d45c\" (UniqueName: \"kubernetes.io/projected/e8c11462-1366-4e0f-9003-6079b25c6b04-kube-api-access-6d45c\") pod \"e8c11462-1366-4e0f-9003-6079b25c6b04\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219407 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8c11462-1366-4e0f-9003-6079b25c6b04-operator-scripts\") pod \"e8c11462-1366-4e0f-9003-6079b25c6b04\" (UID: \"e8c11462-1366-4e0f-9003-6079b25c6b04\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219423 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-combined-ca-bundle\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219472 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnfsv\" (UniqueName: \"kubernetes.io/projected/37d52190-a61c-44fb-9c9c-7966bd00e2c8-kube-api-access-tnfsv\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219488 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-config-data\") pod \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\" (UID: \"3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.219503 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-public-tls-certs\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.244745 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nn9td\" (UniqueName: \"kubernetes.io/projected/9651251a-a0b2-4db8-bb82-b22a707bd7ab-kube-api-access-nn9td\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.244796 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.244808 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9651251a-a0b2-4db8-bb82-b22a707bd7ab-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.279208 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37d52190-a61c-44fb-9c9c-7966bd00e2c8-kube-api-access-tnfsv" (OuterVolumeSpecName: "kube-api-access-tnfsv") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "kube-api-access-tnfsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.280477 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.280721 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.280967 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-logs" (OuterVolumeSpecName: "logs") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.281266 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.282009 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8c11462-1366-4e0f-9003-6079b25c6b04-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8c11462-1366-4e0f-9003-6079b25c6b04" (UID: "e8c11462-1366-4e0f-9003-6079b25c6b04"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.283216 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/228e9671-d3dc-45dd-b200-7496327ebcda-logs" (OuterVolumeSpecName: "logs") pod "228e9671-d3dc-45dd-b200-7496327ebcda" (UID: "228e9671-d3dc-45dd-b200-7496327ebcda"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.283307 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.290149 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "glance") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.296156 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-kube-api-access-v8m6c" (OuterVolumeSpecName: "kube-api-access-v8m6c") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "kube-api-access-v8m6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.296484 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8c11462-1366-4e0f-9003-6079b25c6b04-kube-api-access-6d45c" (OuterVolumeSpecName: "kube-api-access-6d45c") pod "e8c11462-1366-4e0f-9003-6079b25c6b04" (UID: "e8c11462-1366-4e0f-9003-6079b25c6b04"). InnerVolumeSpecName "kube-api-access-6d45c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.303078 5010 generic.go:334] "Generic (PLEG): container finished" podID="7e02370f-1b63-47f7-8d66-ba7c94310c38" containerID="91d30cd917e1e2e4008b14f3788854cb37b47aec0ecb90baaa45b3f45936f4c7" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.303172 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder3420-account-delete-8w2px" event={"ID":"7e02370f-1b63-47f7-8d66-ba7c94310c38","Type":"ContainerDied","Data":"91d30cd917e1e2e4008b14f3788854cb37b47aec0ecb90baaa45b3f45936f4c7"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.312529 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.327103 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.327208 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/228e9671-d3dc-45dd-b200-7496327ebcda-kube-api-access-nq42x" (OuterVolumeSpecName: "kube-api-access-nq42x") pod "228e9671-d3dc-45dd-b200-7496327ebcda" (UID: "228e9671-d3dc-45dd-b200-7496327ebcda"). InnerVolumeSpecName "kube-api-access-nq42x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.327766 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.341213 5010 generic.go:334] "Generic (PLEG): container finished" podID="228e9671-d3dc-45dd-b200-7496327ebcda" containerID="ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.341294 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"228e9671-d3dc-45dd-b200-7496327ebcda","Type":"ContainerDied","Data":"ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.341324 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"228e9671-d3dc-45dd-b200-7496327ebcda","Type":"ContainerDied","Data":"94b484d9d58e76a9c9df1ee1f29b1cceaf21e784e8b17d372c94070df020c504"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.341409 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.359891 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts" (OuterVolumeSpecName: "scripts") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.423575 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts\") pod \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\" (UID: \"37d52190-a61c-44fb-9c9c-7966bd00e2c8\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.423653 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lv6pw\" (UniqueName: \"kubernetes.io/projected/92c26092-3d97-417f-aaa7-48723d6c88be-kube-api-access-lv6pw\") pod \"92c26092-3d97-417f-aaa7-48723d6c88be\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.423775 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-combined-ca-bundle\") pod \"92c26092-3d97-417f-aaa7-48723d6c88be\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.423938 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-internal-tls-certs\") pod \"92c26092-3d97-417f-aaa7-48723d6c88be\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.423974 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-public-tls-certs\") pod \"92c26092-3d97-417f-aaa7-48723d6c88be\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.424044 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92c26092-3d97-417f-aaa7-48723d6c88be-logs\") pod \"92c26092-3d97-417f-aaa7-48723d6c88be\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.424088 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-config-data\") pod \"92c26092-3d97-417f-aaa7-48723d6c88be\" (UID: \"92c26092-3d97-417f-aaa7-48723d6c88be\") " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425404 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/228e9671-d3dc-45dd-b200-7496327ebcda-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425422 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425435 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425443 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/37d52190-a61c-44fb-9c9c-7966bd00e2c8-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425472 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425483 5010 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425496 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6d45c\" (UniqueName: \"kubernetes.io/projected/e8c11462-1366-4e0f-9003-6079b25c6b04-kube-api-access-6d45c\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425505 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8c11462-1366-4e0f-9003-6079b25c6b04-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425514 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnfsv\" (UniqueName: \"kubernetes.io/projected/37d52190-a61c-44fb-9c9c-7966bd00e2c8-kube-api-access-tnfsv\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425559 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425569 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8m6c\" (UniqueName: \"kubernetes.io/projected/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-kube-api-access-v8m6c\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.425578 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nq42x\" (UniqueName: \"kubernetes.io/projected/228e9671-d3dc-45dd-b200-7496327ebcda-kube-api-access-nq42x\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.445290 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/92c26092-3d97-417f-aaa7-48723d6c88be-logs" (OuterVolumeSpecName: "logs") pod "92c26092-3d97-417f-aaa7-48723d6c88be" (UID: "92c26092-3d97-417f-aaa7-48723d6c88be"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: W1126 15:52:50.446227 5010 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/37d52190-a61c-44fb-9c9c-7966bd00e2c8/volumes/kubernetes.io~secret/scripts Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.446256 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts" (OuterVolumeSpecName: "scripts") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.447131 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement0cf4-account-delete-xrw9x" event={"ID":"e8c11462-1366-4e0f-9003-6079b25c6b04","Type":"ContainerDied","Data":"9ecf757230d66a5baf0e952cf87db376c67b755faf3e81855703e55b2ff4fae4"} Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.447328 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement0cf4-account-delete-xrw9x" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.447419 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9ecf757230d66a5baf0e952cf87db376c67b755faf3e81855703e55b2ff4fae4" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.451126 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.451469 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.456101 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.460238 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.460528 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="6243a3e1-835d-4150-afea-1f2bb0032065" containerName="memcached" containerID="cri-o://592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f" gracePeriod=30 Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.460985 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.462249 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.462316 5010 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.462377 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.463311 5010 generic.go:334] "Generic (PLEG): container finished" podID="2e8dfd8a-0624-4f78-8c35-c6710328de9d" containerID="130cd73fcbfef4ee3a96c354f8416b71c7b8fee2a7b71d13849287e7634e311c" exitCode=0 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.463382 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glancec7b0-account-delete-9tpdl" event={"ID":"2e8dfd8a-0624-4f78-8c35-c6710328de9d","Type":"ContainerDied","Data":"130cd73fcbfef4ee3a96c354f8416b71c7b8fee2a7b71d13849287e7634e311c"} Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.464760 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.464815 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.505542 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/92c26092-3d97-417f-aaa7-48723d6c88be-kube-api-access-lv6pw" (OuterVolumeSpecName: "kube-api-access-lv6pw") pod "92c26092-3d97-417f-aaa7-48723d6c88be" (UID: "92c26092-3d97-417f-aaa7-48723d6c88be"). InnerVolumeSpecName "kube-api-access-lv6pw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.507953 5010 scope.go:117] "RemoveContainer" containerID="5333a0de78b475fd78f332fa0f32083caa1395fc128350a6a203fa02b8019334" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.517280 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.529950 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/92c26092-3d97-417f-aaa7-48723d6c88be-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.529980 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.529990 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lv6pw\" (UniqueName: \"kubernetes.io/projected/92c26092-3d97-417f-aaa7-48723d6c88be-kube-api-access-lv6pw\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.544646 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-58b6s"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.550868 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.553971 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-config-data" (OuterVolumeSpecName: "config-data") pod "9651251a-a0b2-4db8-bb82-b22a707bd7ab" (UID: "9651251a-a0b2-4db8-bb82-b22a707bd7ab"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.558479 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-vmqg9"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.578951 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-58b6s"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.590679 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.594033 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-vmqg9"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.609583 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-config-data" (OuterVolumeSpecName: "config-data") pod "228e9671-d3dc-45dd-b200-7496327ebcda" (UID: "228e9671-d3dc-45dd-b200-7496327ebcda"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.623409 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystoneebd8-account-delete-2ghcn"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.623986 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624009 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-log" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624020 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="ovsdbserver-sb" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624028 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="ovsdbserver-sb" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624044 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624051 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624076 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8c11462-1366-4e0f-9003-6079b25c6b04" containerName="mariadb-account-delete" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624083 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8c11462-1366-4e0f-9003-6079b25c6b04" containerName="mariadb-account-delete" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624093 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624100 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-log" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624108 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624116 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624125 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-api" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624131 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-api" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624139 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1afd71d7-914c-4e41-b04f-0325049fa972" containerName="galera" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624145 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1afd71d7-914c-4e41-b04f-0325049fa972" containerName="galera" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624154 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="probe" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624162 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="probe" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624170 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-api" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624178 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-api" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624188 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="ovn-northd" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624195 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="ovn-northd" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624207 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1afd71d7-914c-4e41-b04f-0325049fa972" containerName="mysql-bootstrap" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624213 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1afd71d7-914c-4e41-b04f-0325049fa972" containerName="mysql-bootstrap" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624228 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624235 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624248 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e618fcce-218b-4f09-a0ae-5cad873d9aab" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624254 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e618fcce-218b-4f09-a0ae-5cad873d9aab" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624264 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerName="dnsmasq-dns" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624270 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerName="dnsmasq-dns" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624276 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-metadata" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624282 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-metadata" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624292 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624298 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624311 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerName="init" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624316 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerName="init" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624330 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-httpd" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624336 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-httpd" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624344 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="ovsdbserver-nb" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624350 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="ovsdbserver-nb" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624381 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-httpd" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624389 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-httpd" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624401 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624407 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-log" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624416 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="cinder-scheduler" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624423 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="cinder-scheduler" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624434 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624440 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-log" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.624448 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-server" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624454 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-server" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624646 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="ovsdbserver-sb" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624659 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e581b31-6b6d-4e32-8775-3446bcf717d9" containerName="dnsmasq-dns" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624666 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-api" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624674 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624683 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-httpd" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624696 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8c11462-1366-4e0f-9003-6079b25c6b04" containerName="mariadb-account-delete" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624702 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5780f988-6f45-4fdb-9a2b-f149c0499552" containerName="ovsdbserver-nb" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624727 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" containerName="nova-api-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624739 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="ovn-northd" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624749 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624761 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e618fcce-218b-4f09-a0ae-5cad873d9aab" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624769 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1afd71d7-914c-4e41-b04f-0325049fa972" containerName="galera" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624777 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624784 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" containerName="placement-api" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624795 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="776a1766-4e7d-4ea0-bd5b-18b6b352448a" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624805 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" containerName="glance-httpd" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624811 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-metadata" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624825 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" containerName="nova-metadata-log" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624841 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3261dde1-64a6-4fe7-851e-4a5754444fd0" containerName="ovn-controller" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624853 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="probe" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624863 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" containerName="cinder-scheduler" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624875 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" containerName="proxy-server" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.624888 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9fea0e4-4e18-4d7e-9af0-fd46b742565c" containerName="openstack-network-exporter" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.625559 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.632484 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.632514 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.632545 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.632555 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.659814 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.666019 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-config-data" (OuterVolumeSpecName: "config-data") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.681064 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7b9b5b699d-rh4fw"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.681330 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-7b9b5b699d-rh4fw" podUID="d6093731-a529-4e5b-94bd-4948ab30cedc" containerName="keystone-api" containerID="cri-o://b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d" gracePeriod=30 Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.681813 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "228e9671-d3dc-45dd-b200-7496327ebcda" (UID: "228e9671-d3dc-45dd-b200-7496327ebcda"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.712127 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystoneebd8-account-delete-2ghcn"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.718906 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.734047 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.734140 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2drnz\" (UniqueName: \"kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.734202 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.734215 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.734228 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.749793 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-pnnp6"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.759042 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystoneebd8-account-delete-2ghcn"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.765278 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-ebd8-account-create-update-t7wlc"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.771398 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-pnnp6"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.778195 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-ebd8-account-create-update-t7wlc"] Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.806442 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "92c26092-3d97-417f-aaa7-48723d6c88be" (UID: "92c26092-3d97-417f-aaa7-48723d6c88be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.811007 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.835775 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2drnz\" (UniqueName: \"kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.836167 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.836397 5010 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.836455 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts podName:4941e4a7-7638-4a30-91b2-73b25ead50c2 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:51.336436793 +0000 UTC m=+1592.127153941 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts") pod "keystoneebd8-account-delete-2ghcn" (UID: "4941e4a7-7638-4a30-91b2-73b25ead50c2") : configmap "openstack-scripts" not found Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.837436 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.837492 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.841359 5010 projected.go:194] Error preparing data for projected volume kube-api-access-2drnz for pod openstack/keystoneebd8-account-delete-2ghcn: failed to fetch token: serviceaccounts "galera-openstack" not found Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.841448 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz podName:4941e4a7-7638-4a30-91b2-73b25ead50c2 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:51.341432968 +0000 UTC m=+1592.132150116 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-2drnz" (UniqueName: "kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz") pod "keystoneebd8-account-delete-2ghcn" (UID: "4941e4a7-7638-4a30-91b2-73b25ead50c2") : failed to fetch token: serviceaccounts "galera-openstack" not found Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.847286 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-config-data" (OuterVolumeSpecName: "config-data") pod "92c26092-3d97-417f-aaa7-48723d6c88be" (UID: "92c26092-3d97-417f-aaa7-48723d6c88be"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.857178 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "92c26092-3d97-417f-aaa7-48723d6c88be" (UID: "92c26092-3d97-417f-aaa7-48723d6c88be"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.858180 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.859449 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.860540 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.860566 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="1cfc9265-de84-4047-9e01-69444aa4d9f5" containerName="nova-cell1-conductor-conductor" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.861010 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "9651251a-a0b2-4db8-bb82-b22a707bd7ab" (UID: "9651251a-a0b2-4db8-bb82-b22a707bd7ab"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.885014 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.888346 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.898434 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "92c26092-3d97-417f-aaa7-48723d6c88be" (UID: "92c26092-3d97-417f-aaa7-48723d6c88be"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.904265 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.165:8776/healthcheck\": dial tcp 10.217.0.165:8776: connect: connection refused" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.904428 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" (UID: "3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.930089 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9651251a-a0b2-4db8-bb82-b22a707bd7ab" (UID: "9651251a-a0b2-4db8-bb82-b22a707bd7ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.930273 5010 scope.go:117] "RemoveContainer" containerID="9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.930472 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.935900 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-config-data" (OuterVolumeSpecName: "config-data") pod "37d52190-a61c-44fb-9c9c-7966bd00e2c8" (UID: "37d52190-a61c-44fb-9c9c-7966bd00e2c8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.938147 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "228e9671-d3dc-45dd-b200-7496327ebcda" (UID: "228e9671-d3dc-45dd-b200-7496327ebcda"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.938282 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs\") pod \"228e9671-d3dc-45dd-b200-7496327ebcda\" (UID: \"228e9671-d3dc-45dd-b200-7496327ebcda\") " Nov 26 15:52:50 crc kubenswrapper[5010]: W1126 15:52:50.938829 5010 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/228e9671-d3dc-45dd-b200-7496327ebcda/volumes/kubernetes.io~secret/nova-metadata-tls-certs Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.938848 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "228e9671-d3dc-45dd-b200-7496327ebcda" (UID: "228e9671-d3dc-45dd-b200-7496327ebcda"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939121 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939143 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939153 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939163 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939172 5010 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/228e9671-d3dc-45dd-b200-7496327ebcda-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939182 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37d52190-a61c-44fb-9c9c-7966bd00e2c8-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939191 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939201 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.939209 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/92c26092-3d97-417f-aaa7-48723d6c88be-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:50 crc kubenswrapper[5010]: E1126 15:52:50.955502 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-2drnz operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystoneebd8-account-delete-2ghcn" podUID="4941e4a7-7638-4a30-91b2-73b25ead50c2" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.962725 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.994015 5010 scope.go:117] "RemoveContainer" containerID="1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045" Nov 26 15:52:50 crc kubenswrapper[5010]: I1126 15:52:50.996486 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "9651251a-a0b2-4db8-bb82-b22a707bd7ab" (UID: "9651251a-a0b2-4db8-bb82-b22a707bd7ab"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.015385 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerName="galera" containerID="cri-o://8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" gracePeriod=30 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.029271 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.030116 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.037098 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.042833 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-combined-ca-bundle\") pod \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.042879 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-scripts\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.042943 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-public-tls-certs\") pod \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043018 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043088 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-logs\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043119 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-internal-tls-certs\") pod \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043156 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-internal-tls-certs\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043213 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2tzpc\" (UniqueName: \"kubernetes.io/projected/fe931cd2-6e31-4e82-a617-f028019a60c4-kube-api-access-2tzpc\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043268 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data-custom\") pod \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043297 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-combined-ca-bundle\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043333 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-config-data\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043372 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-httpd-run\") pod \"fe931cd2-6e31-4e82-a617-f028019a60c4\" (UID: \"fe931cd2-6e31-4e82-a617-f028019a60c4\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043407 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e65ad49-eec3-460d-aa80-0880c5e2e86b-logs\") pod \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043459 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwr8w\" (UniqueName: \"kubernetes.io/projected/0e65ad49-eec3-460d-aa80-0880c5e2e86b-kube-api-access-hwr8w\") pod \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.043486 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data\") pod \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\" (UID: \"0e65ad49-eec3-460d-aa80-0880c5e2e86b\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.044067 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9651251a-a0b2-4db8-bb82-b22a707bd7ab-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.050179 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0e65ad49-eec3-460d-aa80-0880c5e2e86b" (UID: "0e65ad49-eec3-460d-aa80-0880c5e2e86b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.052738 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e65ad49-eec3-460d-aa80-0880c5e2e86b-logs" (OuterVolumeSpecName: "logs") pod "0e65ad49-eec3-460d-aa80-0880c5e2e86b" (UID: "0e65ad49-eec3-460d-aa80-0880c5e2e86b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.054509 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-logs" (OuterVolumeSpecName: "logs") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.054857 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.056182 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe931cd2-6e31-4e82-a617-f028019a60c4-kube-api-access-2tzpc" (OuterVolumeSpecName: "kube-api-access-2tzpc") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "kube-api-access-2tzpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.060944 5010 scope.go:117] "RemoveContainer" containerID="9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.063329 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b\": container with ID starting with 9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b not found: ID does not exist" containerID="9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.063463 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b"} err="failed to get container status \"9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b\": rpc error: code = NotFound desc = could not find container \"9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b\": container with ID starting with 9f145da2e73761e9c2ccd4617b0d93117a9c926562fe07bc7a82554cb724989b not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.063566 5010 scope.go:117] "RemoveContainer" containerID="1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.065381 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045\": container with ID starting with 1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045 not found: ID does not exist" containerID="1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.065470 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045"} err="failed to get container status \"1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045\": rpc error: code = NotFound desc = could not find container \"1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045\": container with ID starting with 1b1d6322639a18e547771fd7481b4eb498b94331fbb856585503322d6a260045 not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.065550 5010 scope.go:117] "RemoveContainer" containerID="ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.074006 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e65ad49-eec3-460d-aa80-0880c5e2e86b-kube-api-access-hwr8w" (OuterVolumeSpecName: "kube-api-access-hwr8w") pod "0e65ad49-eec3-460d-aa80-0880c5e2e86b" (UID: "0e65ad49-eec3-460d-aa80-0880c5e2e86b"). InnerVolumeSpecName "kube-api-access-hwr8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.074439 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-scripts" (OuterVolumeSpecName: "scripts") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.094001 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.108453 5010 scope.go:117] "RemoveContainer" containerID="c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.118564 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0e65ad49-eec3-460d-aa80-0880c5e2e86b" (UID: "0e65ad49-eec3-460d-aa80-0880c5e2e86b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.122570 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-config-data" (OuterVolumeSpecName: "config-data") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.123045 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.123240 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e65ad49-eec3-460d-aa80-0880c5e2e86b" (UID: "0e65ad49-eec3-460d-aa80-0880c5e2e86b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.135117 5010 scope.go:117] "RemoveContainer" containerID="ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.135682 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb\": container with ID starting with ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb not found: ID does not exist" containerID="ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.135745 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb"} err="failed to get container status \"ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb\": rpc error: code = NotFound desc = could not find container \"ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb\": container with ID starting with ccb6d1eb398b4844d5f68c2ca82a34d6f07df03d5a2acc9b89a644456da536fb not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.135774 5010 scope.go:117] "RemoveContainer" containerID="c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.136385 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data" (OuterVolumeSpecName: "config-data") pod "0e65ad49-eec3-460d-aa80-0880c5e2e86b" (UID: "0e65ad49-eec3-460d-aa80-0880c5e2e86b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.136605 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe931cd2-6e31-4e82-a617-f028019a60c4" (UID: "fe931cd2-6e31-4e82-a617-f028019a60c4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.136908 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a\": container with ID starting with c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a not found: ID does not exist" containerID="c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.136953 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a"} err="failed to get container status \"c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a\": rpc error: code = NotFound desc = could not find container \"c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a\": container with ID starting with c6b7b0eaa9f5e69fa5c44e5fcf34d05ef221f36ff845e1902e310749e6f1c69a not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145047 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data\") pod \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145115 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pdvl\" (UniqueName: \"kubernetes.io/projected/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-api-access-9pdvl\") pod \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145159 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-combined-ca-bundle\") pod \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145212 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8z8v\" (UniqueName: \"kubernetes.io/projected/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-kube-api-access-w8z8v\") pod \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145261 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-combined-ca-bundle\") pod \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145318 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-config\") pod \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145351 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-certs\") pod \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\" (UID: \"ed209eb8-b2b9-4101-9eda-2762259ea2cd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145380 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-logs\") pod \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145400 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data-custom\") pod \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\" (UID: \"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145775 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145805 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145818 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145829 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145839 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2tzpc\" (UniqueName: \"kubernetes.io/projected/fe931cd2-6e31-4e82-a617-f028019a60c4-kube-api-access-2tzpc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145849 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145857 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145866 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145875 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe931cd2-6e31-4e82-a617-f028019a60c4-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145883 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0e65ad49-eec3-460d-aa80-0880c5e2e86b-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145892 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwr8w\" (UniqueName: \"kubernetes.io/projected/0e65ad49-eec3-460d-aa80-0880c5e2e86b-kube-api-access-hwr8w\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145903 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145911 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.145920 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe931cd2-6e31-4e82-a617-f028019a60c4-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.157615 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-logs" (OuterVolumeSpecName: "logs") pod "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" (UID: "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.157818 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0e65ad49-eec3-460d-aa80-0880c5e2e86b" (UID: "0e65ad49-eec3-460d-aa80-0880c5e2e86b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.168511 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-546d9f9b4-87p6s"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.169267 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.171151 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.178036 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-api-access-9pdvl" (OuterVolumeSpecName: "kube-api-access-9pdvl") pod "ed209eb8-b2b9-4101-9eda-2762259ea2cd" (UID: "ed209eb8-b2b9-4101-9eda-2762259ea2cd"). InnerVolumeSpecName "kube-api-access-9pdvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.183219 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" (UID: "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.185791 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.192631 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-kube-api-access-w8z8v" (OuterVolumeSpecName: "kube-api-access-w8z8v") pod "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" (UID: "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a"). InnerVolumeSpecName "kube-api-access-w8z8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.213264 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "ed209eb8-b2b9-4101-9eda-2762259ea2cd" (UID: "ed209eb8-b2b9-4101-9eda-2762259ea2cd"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.217816 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-546d9f9b4-87p6s"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.228428 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.234525 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed209eb8-b2b9-4101-9eda-2762259ea2cd" (UID: "ed209eb8-b2b9-4101-9eda-2762259ea2cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.246435 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.246528 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "ed209eb8-b2b9-4101-9eda-2762259ea2cd" (UID: "ed209eb8-b2b9-4101-9eda-2762259ea2cd"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247467 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-logs\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247505 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247536 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-scripts\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247593 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-combined-ca-bundle\") pod \"9687c9f4-9131-4c43-a1f2-2faf3040e499\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247639 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-public-tls-certs\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247689 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-internal-tls-certs\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247743 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsxsl\" (UniqueName: \"kubernetes.io/projected/9687c9f4-9131-4c43-a1f2-2faf3040e499-kube-api-access-qsxsl\") pod \"9687c9f4-9131-4c43-a1f2-2faf3040e499\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247769 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9687c9f4-9131-4c43-a1f2-2faf3040e499-logs\") pod \"9687c9f4-9131-4c43-a1f2-2faf3040e499\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247822 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data\") pod \"9687c9f4-9131-4c43-a1f2-2faf3040e499\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247846 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-etc-machine-id\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247884 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mvqh\" (UniqueName: \"kubernetes.io/projected/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-kube-api-access-8mvqh\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247909 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-combined-ca-bundle\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.247955 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data-custom\") pod \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\" (UID: \"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.248008 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data-custom\") pod \"9687c9f4-9131-4c43-a1f2-2faf3040e499\" (UID: \"9687c9f4-9131-4c43-a1f2-2faf3040e499\") " Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.248403 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.249912 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.249933 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pdvl\" (UniqueName: \"kubernetes.io/projected/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-api-access-9pdvl\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.249943 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.249958 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8z8v\" (UniqueName: \"kubernetes.io/projected/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-kube-api-access-w8z8v\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.249968 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e65ad49-eec3-460d-aa80-0880c5e2e86b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.249977 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.249987 5010 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.250000 5010 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ed209eb8-b2b9-4101-9eda-2762259ea2cd-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.250012 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.250021 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.251145 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-logs" (OuterVolumeSpecName: "logs") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.256293 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" (UID: "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.256949 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9687c9f4-9131-4c43-a1f2-2faf3040e499-logs" (OuterVolumeSpecName: "logs") pod "9687c9f4-9131-4c43-a1f2-2faf3040e499" (UID: "9687c9f4-9131-4c43-a1f2-2faf3040e499"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.259666 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "9687c9f4-9131-4c43-a1f2-2faf3040e499" (UID: "9687c9f4-9131-4c43-a1f2-2faf3040e499"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.263134 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.263912 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9687c9f4-9131-4c43-a1f2-2faf3040e499-kube-api-access-qsxsl" (OuterVolumeSpecName: "kube-api-access-qsxsl") pod "9687c9f4-9131-4c43-a1f2-2faf3040e499" (UID: "9687c9f4-9131-4c43-a1f2-2faf3040e499"). InnerVolumeSpecName "kube-api-access-qsxsl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.264950 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-kube-api-access-8mvqh" (OuterVolumeSpecName: "kube-api-access-8mvqh") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "kube-api-access-8mvqh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.277317 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data" (OuterVolumeSpecName: "config-data") pod "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" (UID: "c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.287282 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-scripts" (OuterVolumeSpecName: "scripts") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.296366 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9687c9f4-9131-4c43-a1f2-2faf3040e499" (UID: "9687c9f4-9131-4c43-a1f2-2faf3040e499"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.311261 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.318412 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data" (OuterVolumeSpecName: "config-data") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.345565 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.355118 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.355840 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2drnz\" (UniqueName: \"kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.355950 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.355966 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.355978 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.355987 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsxsl\" (UniqueName: \"kubernetes.io/projected/9687c9f4-9131-4c43-a1f2-2faf3040e499-kube-api-access-qsxsl\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.355996 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9687c9f4-9131-4c43-a1f2-2faf3040e499-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356005 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mvqh\" (UniqueName: \"kubernetes.io/projected/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-kube-api-access-8mvqh\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356014 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356024 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356034 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356044 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356053 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356061 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-logs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.356069 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.355919 5010 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.357371 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts podName:4941e4a7-7638-4a30-91b2-73b25ead50c2 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:52.357350604 +0000 UTC m=+1593.148067752 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts") pod "keystoneebd8-account-delete-2ghcn" (UID: "4941e4a7-7638-4a30-91b2-73b25ead50c2") : configmap "openstack-scripts" not found Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.359463 5010 projected.go:194] Error preparing data for projected volume kube-api-access-2drnz for pod openstack/keystoneebd8-account-delete-2ghcn: failed to fetch token: serviceaccounts "galera-openstack" not found Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.359552 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz podName:4941e4a7-7638-4a30-91b2-73b25ead50c2 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:52.359528519 +0000 UTC m=+1593.150245737 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-2drnz" (UniqueName: "kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz") pod "keystoneebd8-account-delete-2ghcn" (UID: "4941e4a7-7638-4a30-91b2-73b25ead50c2") : failed to fetch token: serviceaccounts "galera-openstack" not found Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.363471 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" (UID: "58a117e9-40a2-43bc-b52b-6bbfdd0f45dd"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.380837 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data" (OuterVolumeSpecName: "config-data") pod "9687c9f4-9131-4c43-a1f2-2faf3040e499" (UID: "9687c9f4-9131-4c43-a1f2-2faf3040e499"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.414512 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="9940cbe6-c323-4320-9e45-463e5c023156" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.105:5671: connect: connection refused" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.457391 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9687c9f4-9131-4c43-a1f2-2faf3040e499-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.457439 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.483527 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"92c26092-3d97-417f-aaa7-48723d6c88be","Type":"ContainerDied","Data":"673dbc71df9a4d6faf4dfc22583eacd90d47aa62c25e74b92b8eba49fec3ef5c"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.483581 5010 scope.go:117] "RemoveContainer" containerID="8b5f9be0c133e2c0d365af8abb3b23cff3165b9fc4853de720fd9f31b4b01e06" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.484029 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.486546 5010 generic.go:334] "Generic (PLEG): container finished" podID="ed209eb8-b2b9-4101-9eda-2762259ea2cd" containerID="1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56" exitCode=2 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.486615 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ed209eb8-b2b9-4101-9eda-2762259ea2cd","Type":"ContainerDied","Data":"1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.486638 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ed209eb8-b2b9-4101-9eda-2762259ea2cd","Type":"ContainerDied","Data":"1fde3fb5e760b57b7690e7599878dc5640e2635c090d3aeb394a3d92b3322e48"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.486702 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.491013 5010 generic.go:334] "Generic (PLEG): container finished" podID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerID="1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904" exitCode=0 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.491060 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" event={"ID":"9687c9f4-9131-4c43-a1f2-2faf3040e499","Type":"ContainerDied","Data":"1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.491078 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" event={"ID":"9687c9f4-9131-4c43-a1f2-2faf3040e499","Type":"ContainerDied","Data":"86ed3de104751ba238c1775bc15e2c8bbb8db23a69c23d9e4ff0323e7d429630"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.491165 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-cd69b7494-nmz2d" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.500629 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe931cd2-6e31-4e82-a617-f028019a60c4","Type":"ContainerDied","Data":"1d09fefea6425292ffe509ee1e2b6e0ab4205ea5dd4513f4c904ddea49391dc6"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.500690 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.504266 5010 generic.go:334] "Generic (PLEG): container finished" podID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerID="7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822" exitCode=0 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.504325 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd","Type":"ContainerDied","Data":"7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.504346 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"58a117e9-40a2-43bc-b52b-6bbfdd0f45dd","Type":"ContainerDied","Data":"da3dc1e4dbe15fba67590315abbdb41393dc3b2b5648156ab5c3380328ea7b9c"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.504471 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.521033 5010 scope.go:117] "RemoveContainer" containerID="26f349f0d4a74599a92410c53237aaac653bda0a60be6f0aa87a4a0d24166ef0" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.522313 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-587c687588-ztm89" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.522323 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-587c687588-ztm89" event={"ID":"0e65ad49-eec3-460d-aa80-0880c5e2e86b","Type":"ContainerDied","Data":"72d83b24cb89b212c1c88444156a9749ee449365d3a65080c7e3957737694b2b"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.535166 5010 generic.go:334] "Generic (PLEG): container finished" podID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerID="4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a" exitCode=0 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.535195 5010 generic.go:334] "Generic (PLEG): container finished" podID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerID="f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c" exitCode=2 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.535203 5010 generic.go:334] "Generic (PLEG): container finished" podID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerID="6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b" exitCode=0 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.535234 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerDied","Data":"4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.535279 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerDied","Data":"f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.535289 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerDied","Data":"6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.544114 5010 generic.go:334] "Generic (PLEG): container finished" podID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerID="256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e" exitCode=0 Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.544204 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.544260 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" event={"ID":"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a","Type":"ContainerDied","Data":"256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.544293 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fbcbc6747-lkhxw" event={"ID":"c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a","Type":"ContainerDied","Data":"0d2fe0a9b060e3461b31c86db2c4b82939d2c7fdc590eb737fa7c3bacd3ec9ee"} Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.544329 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.544347 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6d9f966b7c-7cbw2" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.559862 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-cd69b7494-nmz2d"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.575201 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-cd69b7494-nmz2d"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.584694 5010 scope.go:117] "RemoveContainer" containerID="1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.585755 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.614686 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.644694 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.660611 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.671163 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.697654 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.721148 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.728911 5010 scope.go:117] "RemoveContainer" containerID="1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.729387 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56\": container with ID starting with 1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56 not found: ID does not exist" containerID="1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.729433 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56"} err="failed to get container status \"1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56\": rpc error: code = NotFound desc = could not find container \"1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56\": container with ID starting with 1472d002ff29a6c01444915702f1c8f6d1c024be83e84cee0a9fc141bd2f3f56 not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.729466 5010 scope.go:117] "RemoveContainer" containerID="1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.732927 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.747997 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.761491 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-587c687588-ztm89"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.768348 5010 scope.go:117] "RemoveContainer" containerID="ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.770221 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-587c687588-ztm89"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.776862 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7fbcbc6747-lkhxw"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.782442 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-7fbcbc6747-lkhxw"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.787860 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-6d9f966b7c-7cbw2"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.792743 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-6d9f966b7c-7cbw2"] Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.794207 5010 scope.go:117] "RemoveContainer" containerID="1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.794620 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904\": container with ID starting with 1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904 not found: ID does not exist" containerID="1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.794659 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904"} err="failed to get container status \"1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904\": rpc error: code = NotFound desc = could not find container \"1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904\": container with ID starting with 1ae29154cc1a9c2d901ef43c27f670f57e17d275ff38747cdc28835c07b60904 not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.794681 5010 scope.go:117] "RemoveContainer" containerID="ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.794978 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e\": container with ID starting with ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e not found: ID does not exist" containerID="ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.795018 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e"} err="failed to get container status \"ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e\": rpc error: code = NotFound desc = could not find container \"ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e\": container with ID starting with ee3f8fe43cb064bc1e3f269047d26e208ff576de7e01698e2092c8f8debc121e not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.795050 5010 scope.go:117] "RemoveContainer" containerID="e14aef587918296a922d16942a038b94eb34c104faed82cb3cae2790e3e19fba" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.828039 5010 scope.go:117] "RemoveContainer" containerID="547cc5858c244164a45ddd7c0b27e3033da3950ba49796f26212abc8845b9246" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.855195 5010 scope.go:117] "RemoveContainer" containerID="7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.871275 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.871439 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data podName:a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:59.871371241 +0000 UTC m=+1600.662088389 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data") pod "rabbitmq-cell1-server-0" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25") : configmap "rabbitmq-cell1-config-data" not found Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.889397 5010 scope.go:117] "RemoveContainer" containerID="7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.905928 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0180fc92-954c-4857-9caf-4b4e5ca0c214" path="/var/lib/kubelet/pods/0180fc92-954c-4857-9caf-4b4e5ca0c214/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.907212 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" path="/var/lib/kubelet/pods/0e65ad49-eec3-460d-aa80-0880c5e2e86b/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.908155 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1afd71d7-914c-4e41-b04f-0325049fa972" path="/var/lib/kubelet/pods/1afd71d7-914c-4e41-b04f-0325049fa972/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.909908 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="228e9671-d3dc-45dd-b200-7496327ebcda" path="/var/lib/kubelet/pods/228e9671-d3dc-45dd-b200-7496327ebcda/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.913623 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37d52190-a61c-44fb-9c9c-7966bd00e2c8" path="/var/lib/kubelet/pods/37d52190-a61c-44fb-9c9c-7966bd00e2c8/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.915095 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc" path="/var/lib/kubelet/pods/3ccc5bea-5150-4613-ba7c-aa3e8bbef4bc/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.915870 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55bd51a9-df41-4ab4-be2b-43dd4d776bf5" path="/var/lib/kubelet/pods/55bd51a9-df41-4ab4-be2b-43dd4d776bf5/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.916483 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56eb624a-00e8-476d-b468-aa83bc64faad" path="/var/lib/kubelet/pods/56eb624a-00e8-476d-b468-aa83bc64faad/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.917885 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" path="/var/lib/kubelet/pods/58a117e9-40a2-43bc-b52b-6bbfdd0f45dd/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.918560 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c7c983d-9ff5-40ac-a5a7-4945f350afb3" path="/var/lib/kubelet/pods/5c7c983d-9ff5-40ac-a5a7-4945f350afb3/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.919156 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="92c26092-3d97-417f-aaa7-48723d6c88be" path="/var/lib/kubelet/pods/92c26092-3d97-417f-aaa7-48723d6c88be/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.920354 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9651251a-a0b2-4db8-bb82-b22a707bd7ab" path="/var/lib/kubelet/pods/9651251a-a0b2-4db8-bb82-b22a707bd7ab/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.921202 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" path="/var/lib/kubelet/pods/9687c9f4-9131-4c43-a1f2-2faf3040e499/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.922473 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" path="/var/lib/kubelet/pods/c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.923461 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d16011c3-075a-4cff-a221-16ed50067a9e" path="/var/lib/kubelet/pods/d16011c3-075a-4cff-a221-16ed50067a9e/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.924219 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e618fcce-218b-4f09-a0ae-5cad873d9aab" path="/var/lib/kubelet/pods/e618fcce-218b-4f09-a0ae-5cad873d9aab/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.924875 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed209eb8-b2b9-4101-9eda-2762259ea2cd" path="/var/lib/kubelet/pods/ed209eb8-b2b9-4101-9eda-2762259ea2cd/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.926042 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" path="/var/lib/kubelet/pods/fe931cd2-6e31-4e82-a617-f028019a60c4/volumes" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.930688 5010 scope.go:117] "RemoveContainer" containerID="7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.931199 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822\": container with ID starting with 7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822 not found: ID does not exist" containerID="7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.931242 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822"} err="failed to get container status \"7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822\": rpc error: code = NotFound desc = could not find container \"7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822\": container with ID starting with 7d08a6608361c6e396ac6eb3dc2ea755308694ee490615f58d23907780fb3822 not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.931272 5010 scope.go:117] "RemoveContainer" containerID="7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5" Nov 26 15:52:51 crc kubenswrapper[5010]: E1126 15:52:51.931529 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5\": container with ID starting with 7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5 not found: ID does not exist" containerID="7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.931549 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5"} err="failed to get container status \"7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5\": rpc error: code = NotFound desc = could not find container \"7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5\": container with ID starting with 7ea47b7427a2a6557c131f4346de488d90a2de4656ddda69f8834e31f5f12cc5 not found: ID does not exist" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.931563 5010 scope.go:117] "RemoveContainer" containerID="118a80403c8effe28594f56bbbae9975efb6bb4ecc9f75c9df702170fd76f085" Nov 26 15:52:51 crc kubenswrapper[5010]: I1126 15:52:51.952770 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.106:5671: connect: connection refused" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.029999 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.035305 5010 scope.go:117] "RemoveContainer" containerID="9defbd037a4a2f05eca15526ffb9c48bad32cd70369ffd0dc805ef3172852686" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.061852 5010 scope.go:117] "RemoveContainer" containerID="256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.145359 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.163064 5010 scope.go:117] "RemoveContainer" containerID="91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.174612 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e02370f-1b63-47f7-8d66-ba7c94310c38-operator-scripts\") pod \"7e02370f-1b63-47f7-8d66-ba7c94310c38\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.174903 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjppj\" (UniqueName: \"kubernetes.io/projected/7e02370f-1b63-47f7-8d66-ba7c94310c38-kube-api-access-fjppj\") pod \"7e02370f-1b63-47f7-8d66-ba7c94310c38\" (UID: \"7e02370f-1b63-47f7-8d66-ba7c94310c38\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.175360 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e02370f-1b63-47f7-8d66-ba7c94310c38-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "7e02370f-1b63-47f7-8d66-ba7c94310c38" (UID: "7e02370f-1b63-47f7-8d66-ba7c94310c38"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.183965 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.186766 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e02370f-1b63-47f7-8d66-ba7c94310c38-kube-api-access-fjppj" (OuterVolumeSpecName: "kube-api-access-fjppj") pod "7e02370f-1b63-47f7-8d66-ba7c94310c38" (UID: "7e02370f-1b63-47f7-8d66-ba7c94310c38"). InnerVolumeSpecName "kube-api-access-fjppj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.186922 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.210050 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.217399 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.249020 5010 scope.go:117] "RemoveContainer" containerID="256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e" Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.249695 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e\": container with ID starting with 256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e not found: ID does not exist" containerID="256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.249760 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e"} err="failed to get container status \"256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e\": rpc error: code = NotFound desc = could not find container \"256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e\": container with ID starting with 256f9c885d7596d52ee66947f9c128b4739ed28fc18c9928ae98437a3a23225e not found: ID does not exist" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.249789 5010 scope.go:117] "RemoveContainer" containerID="91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e" Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.250239 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e\": container with ID starting with 91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e not found: ID does not exist" containerID="91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.250260 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e"} err="failed to get container status \"91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e\": rpc error: code = NotFound desc = could not find container \"91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e\": container with ID starting with 91aece668b447d0467f9c71b27176aed4904be43ecc5dc12fbc86b48272b126e not found: ID does not exist" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.276207 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxt9m\" (UniqueName: \"kubernetes.io/projected/37e7e487-28ea-405b-a645-a85aa94e12d2-kube-api-access-dxt9m\") pod \"37e7e487-28ea-405b-a645-a85aa94e12d2\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.276323 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6x6m\" (UniqueName: \"kubernetes.io/projected/35439472-3a5f-450f-9fcc-2a739253ad5b-kube-api-access-w6x6m\") pod \"35439472-3a5f-450f-9fcc-2a739253ad5b\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.276373 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35439472-3a5f-450f-9fcc-2a739253ad5b-operator-scripts\") pod \"35439472-3a5f-450f-9fcc-2a739253ad5b\" (UID: \"35439472-3a5f-450f-9fcc-2a739253ad5b\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.276392 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e8dfd8a-0624-4f78-8c35-c6710328de9d-operator-scripts\") pod \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.276565 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xttv8\" (UniqueName: \"kubernetes.io/projected/2e8dfd8a-0624-4f78-8c35-c6710328de9d-kube-api-access-xttv8\") pod \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\" (UID: \"2e8dfd8a-0624-4f78-8c35-c6710328de9d\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.276624 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e7e487-28ea-405b-a645-a85aa94e12d2-operator-scripts\") pod \"37e7e487-28ea-405b-a645-a85aa94e12d2\" (UID: \"37e7e487-28ea-405b-a645-a85aa94e12d2\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.276886 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35439472-3a5f-450f-9fcc-2a739253ad5b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "35439472-3a5f-450f-9fcc-2a739253ad5b" (UID: "35439472-3a5f-450f-9fcc-2a739253ad5b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.277007 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2e8dfd8a-0624-4f78-8c35-c6710328de9d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2e8dfd8a-0624-4f78-8c35-c6710328de9d" (UID: "2e8dfd8a-0624-4f78-8c35-c6710328de9d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.277319 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjppj\" (UniqueName: \"kubernetes.io/projected/7e02370f-1b63-47f7-8d66-ba7c94310c38-kube-api-access-fjppj\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.277336 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/35439472-3a5f-450f-9fcc-2a739253ad5b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.277345 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2e8dfd8a-0624-4f78-8c35-c6710328de9d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.277377 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/7e02370f-1b63-47f7-8d66-ba7c94310c38-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.277496 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37e7e487-28ea-405b-a645-a85aa94e12d2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37e7e487-28ea-405b-a645-a85aa94e12d2" (UID: "37e7e487-28ea-405b-a645-a85aa94e12d2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.280123 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e8dfd8a-0624-4f78-8c35-c6710328de9d-kube-api-access-xttv8" (OuterVolumeSpecName: "kube-api-access-xttv8") pod "2e8dfd8a-0624-4f78-8c35-c6710328de9d" (UID: "2e8dfd8a-0624-4f78-8c35-c6710328de9d"). InnerVolumeSpecName "kube-api-access-xttv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.280553 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37e7e487-28ea-405b-a645-a85aa94e12d2-kube-api-access-dxt9m" (OuterVolumeSpecName: "kube-api-access-dxt9m") pod "37e7e487-28ea-405b-a645-a85aa94e12d2" (UID: "37e7e487-28ea-405b-a645-a85aa94e12d2"). InnerVolumeSpecName "kube-api-access-dxt9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.281082 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35439472-3a5f-450f-9fcc-2a739253ad5b-kube-api-access-w6x6m" (OuterVolumeSpecName: "kube-api-access-w6x6m") pod "35439472-3a5f-450f-9fcc-2a739253ad5b" (UID: "35439472-3a5f-450f-9fcc-2a739253ad5b"). InnerVolumeSpecName "kube-api-access-w6x6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.378519 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nm9vx\" (UniqueName: \"kubernetes.io/projected/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-kube-api-access-nm9vx\") pod \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.378615 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-operator-scripts\") pod \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.378691 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgq4l\" (UniqueName: \"kubernetes.io/projected/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-kube-api-access-bgq4l\") pod \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\" (UID: \"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.378748 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-operator-scripts\") pod \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\" (UID: \"0a45c0f6-649b-4b48-8245-4f70da1c3a4f\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.379044 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.379130 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2drnz\" (UniqueName: \"kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz\") pod \"keystoneebd8-account-delete-2ghcn\" (UID: \"4941e4a7-7638-4a30-91b2-73b25ead50c2\") " pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.379202 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5" (UID: "ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.379294 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxt9m\" (UniqueName: \"kubernetes.io/projected/37e7e487-28ea-405b-a645-a85aa94e12d2-kube-api-access-dxt9m\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.379309 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6x6m\" (UniqueName: \"kubernetes.io/projected/35439472-3a5f-450f-9fcc-2a739253ad5b-kube-api-access-w6x6m\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.379318 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xttv8\" (UniqueName: \"kubernetes.io/projected/2e8dfd8a-0624-4f78-8c35-c6710328de9d-kube-api-access-xttv8\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.379326 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37e7e487-28ea-405b-a645-a85aa94e12d2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.379567 5010 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.379684 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts podName:4941e4a7-7638-4a30-91b2-73b25ead50c2 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:54.379645066 +0000 UTC m=+1595.170362294 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts") pod "keystoneebd8-account-delete-2ghcn" (UID: "4941e4a7-7638-4a30-91b2-73b25ead50c2") : configmap "openstack-scripts" not found Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.380357 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0a45c0f6-649b-4b48-8245-4f70da1c3a4f" (UID: "0a45c0f6-649b-4b48-8245-4f70da1c3a4f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.383297 5010 projected.go:194] Error preparing data for projected volume kube-api-access-2drnz for pod openstack/keystoneebd8-account-delete-2ghcn: failed to fetch token: serviceaccounts "galera-openstack" not found Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.383512 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz podName:4941e4a7-7638-4a30-91b2-73b25ead50c2 nodeName:}" failed. No retries permitted until 2025-11-26 15:52:54.383485322 +0000 UTC m=+1595.174202540 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-2drnz" (UniqueName: "kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz") pod "keystoneebd8-account-delete-2ghcn" (UID: "4941e4a7-7638-4a30-91b2-73b25ead50c2") : failed to fetch token: serviceaccounts "galera-openstack" not found Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.398145 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-kube-api-access-bgq4l" (OuterVolumeSpecName: "kube-api-access-bgq4l") pod "ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5" (UID: "ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5"). InnerVolumeSpecName "kube-api-access-bgq4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.414096 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-kube-api-access-nm9vx" (OuterVolumeSpecName: "kube-api-access-nm9vx") pod "0a45c0f6-649b-4b48-8245-4f70da1c3a4f" (UID: "0a45c0f6-649b-4b48-8245-4f70da1c3a4f"). InnerVolumeSpecName "kube-api-access-nm9vx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.487878 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nm9vx\" (UniqueName: \"kubernetes.io/projected/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-kube-api-access-nm9vx\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.487909 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.487919 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgq4l\" (UniqueName: \"kubernetes.io/projected/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5-kube-api-access-bgq4l\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.487928 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0a45c0f6-649b-4b48-8245-4f70da1c3a4f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.502926 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.505130 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.506391 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.506429 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="3c00abcf-4e27-48ae-be52-a92cbd24957c" containerName="nova-cell0-conductor-conductor" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.526940 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.539654 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.541298 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.545583 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" cmd=["/bin/bash","/var/lib/operator-scripts/mysql_probe.sh","readiness"] Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.545644 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerName="galera" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.559283 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novacell0e911-account-delete-8hd9j" event={"ID":"35439472-3a5f-450f-9fcc-2a739253ad5b","Type":"ContainerDied","Data":"8202821a7dfc286f3e5c7c43dbfc8afb10f37817294030b59909cdc1809690c9"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.559537 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8202821a7dfc286f3e5c7c43dbfc8afb10f37817294030b59909cdc1809690c9" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.559636 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novacell0e911-account-delete-8hd9j" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.562671 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/novaapif128-account-delete-msqb5" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.562672 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/novaapif128-account-delete-msqb5" event={"ID":"37e7e487-28ea-405b-a645-a85aa94e12d2","Type":"ContainerDied","Data":"a9408ad09b4d613260f3e8fc0bb6d6a096129bbf38072da73b83eb2e4f57c5e2"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.562737 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9408ad09b4d613260f3e8fc0bb6d6a096129bbf38072da73b83eb2e4f57c5e2" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.577531 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron4616-account-delete-ktdvj" event={"ID":"ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5","Type":"ContainerDied","Data":"069ff544aa7da159d181cbe236cc40ec44ce54436f44326fbc2f3504dd7cd0e0"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.577594 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="069ff544aa7da159d181cbe236cc40ec44ce54436f44326fbc2f3504dd7cd0e0" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.577674 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron4616-account-delete-ktdvj" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.584860 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glancec7b0-account-delete-9tpdl" event={"ID":"2e8dfd8a-0624-4f78-8c35-c6710328de9d","Type":"ContainerDied","Data":"df87b2b96caa04e818d12cf1fc451399f0f69142a4865be35d9783c67ee916a0"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.584909 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df87b2b96caa04e818d12cf1fc451399f0f69142a4865be35d9783c67ee916a0" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.584997 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glancec7b0-account-delete-9tpdl" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.613677 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican920e-account-delete-bv9zw" event={"ID":"0a45c0f6-649b-4b48-8245-4f70da1c3a4f","Type":"ContainerDied","Data":"3872257d041b635dcd61201eef6e22c1fe1acc84114d7490bc52b8ea76b30947"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.614191 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3872257d041b635dcd61201eef6e22c1fe1acc84114d7490bc52b8ea76b30947" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.613702 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican920e-account-delete-bv9zw" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.616437 5010 generic.go:334] "Generic (PLEG): container finished" podID="6243a3e1-835d-4150-afea-1f2bb0032065" containerID="592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f" exitCode=0 Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.616554 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.616838 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6243a3e1-835d-4150-afea-1f2bb0032065","Type":"ContainerDied","Data":"592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.616872 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6243a3e1-835d-4150-afea-1f2bb0032065","Type":"ContainerDied","Data":"62b8d5ed244e315b9c8dbb554f638dc2740e655df728683a208d538650ccbdde"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.616893 5010 scope.go:117] "RemoveContainer" containerID="592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.626644 5010 generic.go:334] "Generic (PLEG): container finished" podID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerID="d3ae680aa34c0a6c9f874b61e0efe2655d40cee16f8635aa026abbab0b4ef8b8" exitCode=0 Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.626672 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25","Type":"ContainerDied","Data":"d3ae680aa34c0a6c9f874b61e0efe2655d40cee16f8635aa026abbab0b4ef8b8"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.631209 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder3420-account-delete-8w2px" event={"ID":"7e02370f-1b63-47f7-8d66-ba7c94310c38","Type":"ContainerDied","Data":"a8a98307122d033d08e0bbee5ea734e989366262a2b5c3dc07d80c012b48e1fc"} Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.631242 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a8a98307122d033d08e0bbee5ea734e989366262a2b5c3dc07d80c012b48e1fc" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.631282 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder3420-account-delete-8w2px" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.632821 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystoneebd8-account-delete-2ghcn" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.684916 5010 scope.go:117] "RemoveContainer" containerID="592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f" Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.686067 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f\": container with ID starting with 592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f not found: ID does not exist" containerID="592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.686117 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f"} err="failed to get container status \"592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f\": rpc error: code = NotFound desc = could not find container \"592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f\": container with ID starting with 592c42da682d945e096f00bd4dc550ee1b5d51fb1cc754c2dd9d8fe23068624f not found: ID does not exist" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.699166 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-memcached-tls-certs\") pod \"6243a3e1-835d-4150-afea-1f2bb0032065\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.699396 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-combined-ca-bundle\") pod \"6243a3e1-835d-4150-afea-1f2bb0032065\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.699440 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7qws2\" (UniqueName: \"kubernetes.io/projected/6243a3e1-835d-4150-afea-1f2bb0032065-kube-api-access-7qws2\") pod \"6243a3e1-835d-4150-afea-1f2bb0032065\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.699503 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-kolla-config\") pod \"6243a3e1-835d-4150-afea-1f2bb0032065\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.699538 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-config-data\") pod \"6243a3e1-835d-4150-afea-1f2bb0032065\" (UID: \"6243a3e1-835d-4150-afea-1f2bb0032065\") " Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.700035 5010 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Nov 26 15:52:52 crc kubenswrapper[5010]: E1126 15:52:52.700169 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data podName:9940cbe6-c323-4320-9e45-463e5c023156 nodeName:}" failed. No retries permitted until 2025-11-26 15:53:00.700148283 +0000 UTC m=+1601.490865431 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data") pod "rabbitmq-server-0" (UID: "9940cbe6-c323-4320-9e45-463e5c023156") : configmap "rabbitmq-config-data" not found Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.704483 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "6243a3e1-835d-4150-afea-1f2bb0032065" (UID: "6243a3e1-835d-4150-afea-1f2bb0032065"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.705424 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-config-data" (OuterVolumeSpecName: "config-data") pod "6243a3e1-835d-4150-afea-1f2bb0032065" (UID: "6243a3e1-835d-4150-afea-1f2bb0032065"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.718083 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6243a3e1-835d-4150-afea-1f2bb0032065-kube-api-access-7qws2" (OuterVolumeSpecName: "kube-api-access-7qws2") pod "6243a3e1-835d-4150-afea-1f2bb0032065" (UID: "6243a3e1-835d-4150-afea-1f2bb0032065"). InnerVolumeSpecName "kube-api-access-7qws2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.725848 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystoneebd8-account-delete-2ghcn"] Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.731997 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystoneebd8-account-delete-2ghcn"] Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.755294 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6243a3e1-835d-4150-afea-1f2bb0032065" (UID: "6243a3e1-835d-4150-afea-1f2bb0032065"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.801109 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "6243a3e1-835d-4150-afea-1f2bb0032065" (UID: "6243a3e1-835d-4150-afea-1f2bb0032065"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.804535 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.804563 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7qws2\" (UniqueName: \"kubernetes.io/projected/6243a3e1-835d-4150-afea-1f2bb0032065-kube-api-access-7qws2\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.804576 5010 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.804586 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6243a3e1-835d-4150-afea-1f2bb0032065-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.804597 5010 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6243a3e1-835d-4150-afea-1f2bb0032065-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.835610 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-5c9c764c5c-5p8zc" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.169:9696/\": dial tcp 10.217.0.169:9696: connect: connection refused" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.907128 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4941e4a7-7638-4a30-91b2-73b25ead50c2-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.907502 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2drnz\" (UniqueName: \"kubernetes.io/projected/4941e4a7-7638-4a30-91b2-73b25ead50c2-kube-api-access-2drnz\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.933888 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.951158 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Nov 26 15:52:52 crc kubenswrapper[5010]: I1126 15:52:52.957343 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.095494 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.097079 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.098483 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.098544 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" containerName="nova-scheduler-scheduler" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112082 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-plugins-conf\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112127 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112162 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-tls\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112197 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-erlang-cookie-secret\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112265 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-pod-info\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112340 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112365 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-erlang-cookie\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112468 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4hmk\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-kube-api-access-w4hmk\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112495 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-server-conf\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112557 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-plugins\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112579 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.112599 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-confd\") pod \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\" (UID: \"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.113006 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.113295 5010 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.113320 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.113979 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.116634 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.117192 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.118128 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.118158 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-pod-info" (OuterVolumeSpecName: "pod-info") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.118937 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-kube-api-access-w4hmk" (OuterVolumeSpecName: "kube-api-access-w4hmk") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "kube-api-access-w4hmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.131492 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data" (OuterVolumeSpecName: "config-data") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.151054 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-server-conf" (OuterVolumeSpecName: "server-conf") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.188467 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.218905 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.218945 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.218958 5010 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.218969 5010 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.218978 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.218987 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4hmk\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-kube-api-access-w4hmk\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.218996 5010 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.219005 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.240361 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" (UID: "a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.242042 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320119 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-erlang-cookie\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320201 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-server-conf\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320241 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9940cbe6-c323-4320-9e45-463e5c023156-pod-info\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320264 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvpxs\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-kube-api-access-cvpxs\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320296 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320334 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-confd\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320515 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9940cbe6-c323-4320-9e45-463e5c023156-erlang-cookie-secret\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320617 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-plugins-conf\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320647 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-tls\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320664 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-plugins\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320694 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data\") pod \"9940cbe6-c323-4320-9e45-463e5c023156\" (UID: \"9940cbe6-c323-4320-9e45-463e5c023156\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.321013 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.321025 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.321149 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.320957 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.321876 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.325732 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9940cbe6-c323-4320-9e45-463e5c023156-pod-info" (OuterVolumeSpecName: "pod-info") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.326339 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "persistence") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.326565 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9940cbe6-c323-4320-9e45-463e5c023156-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.327646 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-kube-api-access-cvpxs" (OuterVolumeSpecName: "kube-api-access-cvpxs") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "kube-api-access-cvpxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.330582 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.376637 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-server-conf" (OuterVolumeSpecName: "server-conf") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.389283 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data" (OuterVolumeSpecName: "config-data") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424876 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424908 5010 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424917 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424926 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424937 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424947 5010 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9940cbe6-c323-4320-9e45-463e5c023156-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424956 5010 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9940cbe6-c323-4320-9e45-463e5c023156-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424965 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvpxs\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-kube-api-access-cvpxs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.424996 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.425005 5010 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9940cbe6-c323-4320-9e45-463e5c023156-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.425956 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9940cbe6-c323-4320-9e45-463e5c023156" (UID: "9940cbe6-c323-4320-9e45-463e5c023156"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.442919 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.527017 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.527061 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9940cbe6-c323-4320-9e45-463e5c023156-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.554010 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.639945 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-combined-ca-bundle\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.639998 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.640024 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pljx7\" (UniqueName: \"kubernetes.io/projected/99fb2212-9383-48c9-b976-1e93a19c3ce1-kube-api-access-pljx7\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.640070 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-default\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.640109 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-generated\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.640127 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-galera-tls-certs\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.640157 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-kolla-config\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.640178 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-operator-scripts\") pod \"99fb2212-9383-48c9-b976-1e93a19c3ce1\" (UID: \"99fb2212-9383-48c9-b976-1e93a19c3ce1\") " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.641089 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.641774 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.642421 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.648444 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.656944 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99fb2212-9383-48c9-b976-1e93a19c3ce1-kube-api-access-pljx7" (OuterVolumeSpecName: "kube-api-access-pljx7") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "kube-api-access-pljx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.664235 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "mysql-db") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.667636 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25","Type":"ContainerDied","Data":"1bc145d0dfde452d66b5f3fca7cfb50b88e28dfb6407f0282c78f91505761933"} Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.667755 5010 scope.go:117] "RemoveContainer" containerID="d3ae680aa34c0a6c9f874b61e0efe2655d40cee16f8635aa026abbab0b4ef8b8" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.667811 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.673851 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.676751 5010 generic.go:334] "Generic (PLEG): container finished" podID="9940cbe6-c323-4320-9e45-463e5c023156" containerID="e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6" exitCode=0 Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.677162 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9940cbe6-c323-4320-9e45-463e5c023156","Type":"ContainerDied","Data":"e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6"} Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.677354 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9940cbe6-c323-4320-9e45-463e5c023156","Type":"ContainerDied","Data":"396c4191d4a6d39950260965f0734390f17733ce7451b70b55426d555c087132"} Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.677209 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.679685 5010 generic.go:334] "Generic (PLEG): container finished" podID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerID="8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" exitCode=0 Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.679729 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"99fb2212-9383-48c9-b976-1e93a19c3ce1","Type":"ContainerDied","Data":"8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08"} Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.679776 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"99fb2212-9383-48c9-b976-1e93a19c3ce1","Type":"ContainerDied","Data":"e5fce497303a18e6cda621e977799dfe711f0b769848eddaade6f7e9e44dd246"} Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.679839 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.716282 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "99fb2212-9383-48c9-b976-1e93a19c3ce1" (UID: "99fb2212-9383-48c9-b976-1e93a19c3ce1"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.729669 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.737290 5010 scope.go:117] "RemoveContainer" containerID="ab6cade5267022ce5c3a9112e0b1e51b93929e7dcbe177fc49bab18f72aaf1a2" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.738863 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741201 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741240 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pljx7\" (UniqueName: \"kubernetes.io/projected/99fb2212-9383-48c9-b976-1e93a19c3ce1-kube-api-access-pljx7\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741253 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-default\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741263 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/99fb2212-9383-48c9-b976-1e93a19c3ce1-config-data-generated\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741272 5010 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741279 5010 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-kolla-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741287 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/99fb2212-9383-48c9-b976-1e93a19c3ce1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.741296 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fb2212-9383-48c9-b976-1e93a19c3ce1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.751218 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.758378 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.765273 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.777589 5010 scope.go:117] "RemoveContainer" containerID="e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.813265 5010 scope.go:117] "RemoveContainer" containerID="fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.840486 5010 scope.go:117] "RemoveContainer" containerID="e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6" Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.841073 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6\": container with ID starting with e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6 not found: ID does not exist" containerID="e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.841111 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6"} err="failed to get container status \"e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6\": rpc error: code = NotFound desc = could not find container \"e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6\": container with ID starting with e34227f52f5e0684d9111f992740f132cf3f62d3e29b7f171ec28f15fe087dc6 not found: ID does not exist" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.841156 5010 scope.go:117] "RemoveContainer" containerID="fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd" Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.841614 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd\": container with ID starting with fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd not found: ID does not exist" containerID="fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.841644 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd"} err="failed to get container status \"fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd\": rpc error: code = NotFound desc = could not find container \"fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd\": container with ID starting with fa0948bb1e406827c71bbec186cbf67a5ac430e60ae1aff2b1dc61675a1db9bd not found: ID does not exist" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.841662 5010 scope.go:117] "RemoveContainer" containerID="8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.843115 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.874607 5010 scope.go:117] "RemoveContainer" containerID="a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.920938 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4941e4a7-7638-4a30-91b2-73b25ead50c2" path="/var/lib/kubelet/pods/4941e4a7-7638-4a30-91b2-73b25ead50c2/volumes" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.921278 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6243a3e1-835d-4150-afea-1f2bb0032065" path="/var/lib/kubelet/pods/6243a3e1-835d-4150-afea-1f2bb0032065/volumes" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.922087 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9940cbe6-c323-4320-9e45-463e5c023156" path="/var/lib/kubelet/pods/9940cbe6-c323-4320-9e45-463e5c023156/volumes" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.923286 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" path="/var/lib/kubelet/pods/a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25/volumes" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.924005 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-86krh"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.924031 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-86krh"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.924776 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement0cf4-account-delete-xrw9x"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.929535 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-0cf4-account-create-update-8zr8q"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.934942 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement0cf4-account-delete-xrw9x"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.939740 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-0cf4-account-create-update-8zr8q"] Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.948318 5010 scope.go:117] "RemoveContainer" containerID="8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.948826 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08\": container with ID starting with 8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08 not found: ID does not exist" containerID="8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.948863 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08"} err="failed to get container status \"8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08\": rpc error: code = NotFound desc = could not find container \"8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08\": container with ID starting with 8025444c64540a705e347bf4069547e356eae21f68931fe19c205fd294109e08 not found: ID does not exist" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.948892 5010 scope.go:117] "RemoveContainer" containerID="a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58" Nov 26 15:52:53 crc kubenswrapper[5010]: E1126 15:52:53.949269 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58\": container with ID starting with a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58 not found: ID does not exist" containerID="a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58" Nov 26 15:52:53 crc kubenswrapper[5010]: I1126 15:52:53.949306 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58"} err="failed to get container status \"a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58\": rpc error: code = NotFound desc = could not find container \"a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58\": container with ID starting with a8ffe627c729622fad4e0c659781e4ba47a9797a27b651e2a5b8d5c8111cff58 not found: ID does not exist" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.023962 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.078517 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.183050 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-rnnz9"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.192253 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-rnnz9"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.207302 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-3420-account-create-update-xs8x7"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.213433 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-3420-account-create-update-xs8x7"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.219390 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder3420-account-delete-8w2px"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.225241 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder3420-account-delete-8w2px"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.229554 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279469 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6p6p2\" (UniqueName: \"kubernetes.io/projected/d6093731-a529-4e5b-94bd-4948ab30cedc-kube-api-access-6p6p2\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279543 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-combined-ca-bundle\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279570 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-fernet-keys\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279588 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-internal-tls-certs\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279671 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-credential-keys\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279701 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-scripts\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279735 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-public-tls-certs\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.279798 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-config-data\") pod \"d6093731-a529-4e5b-94bd-4948ab30cedc\" (UID: \"d6093731-a529-4e5b-94bd-4948ab30cedc\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.284623 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-scripts" (OuterVolumeSpecName: "scripts") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.289465 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-bknm6"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.307954 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.313160 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6093731-a529-4e5b-94bd-4948ab30cedc-kube-api-access-6p6p2" (OuterVolumeSpecName: "kube-api-access-6p6p2") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "kube-api-access-6p6p2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.316112 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.318831 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-bknm6"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.325473 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-920e-account-create-update-zgc6z"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.333257 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican920e-account-delete-bv9zw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.340607 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-920e-account-create-update-zgc6z"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.341697 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.348825 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican920e-account-delete-bv9zw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.354279 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-config-data" (OuterVolumeSpecName: "config-data") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.381920 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.381969 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6p6p2\" (UniqueName: \"kubernetes.io/projected/d6093731-a529-4e5b-94bd-4948ab30cedc-kube-api-access-6p6p2\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.381982 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.381991 5010 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.381999 5010 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.382008 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.392885 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-99648"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.400477 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.408899 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-99648"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.417846 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-c7b0-account-create-update-z6mg5"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.421696 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glancec7b0-account-delete-9tpdl"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.432238 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d6093731-a529-4e5b-94bd-4948ab30cedc" (UID: "d6093731-a529-4e5b-94bd-4948ab30cedc"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.433222 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-c7b0-account-create-update-z6mg5"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.447557 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glancec7b0-account-delete-9tpdl"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.486188 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.486217 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d6093731-a529-4e5b-94bd-4948ab30cedc-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.533470 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-q5wnb"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.545857 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-q5wnb"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.554008 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f128-account-create-update-xklvh"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.554330 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.564016 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f128-account-create-update-xklvh"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.572754 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novaapif128-account-delete-msqb5"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.583869 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novaapif128-account-delete-msqb5"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.587323 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-config-data\") pod \"1cfc9265-de84-4047-9e01-69444aa4d9f5\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.587402 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5xcs\" (UniqueName: \"kubernetes.io/projected/1cfc9265-de84-4047-9e01-69444aa4d9f5-kube-api-access-d5xcs\") pod \"1cfc9265-de84-4047-9e01-69444aa4d9f5\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.587471 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-combined-ca-bundle\") pod \"1cfc9265-de84-4047-9e01-69444aa4d9f5\" (UID: \"1cfc9265-de84-4047-9e01-69444aa4d9f5\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.590471 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cfc9265-de84-4047-9e01-69444aa4d9f5-kube-api-access-d5xcs" (OuterVolumeSpecName: "kube-api-access-d5xcs") pod "1cfc9265-de84-4047-9e01-69444aa4d9f5" (UID: "1cfc9265-de84-4047-9e01-69444aa4d9f5"). InnerVolumeSpecName "kube-api-access-d5xcs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.617862 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-config-data" (OuterVolumeSpecName: "config-data") pod "1cfc9265-de84-4047-9e01-69444aa4d9f5" (UID: "1cfc9265-de84-4047-9e01-69444aa4d9f5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.621917 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1cfc9265-de84-4047-9e01-69444aa4d9f5" (UID: "1cfc9265-de84-4047-9e01-69444aa4d9f5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.653807 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-fj5jk"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.667630 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-fj5jk"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.675307 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/novacell0e911-account-delete-8hd9j"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.683344 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/novacell0e911-account-delete-8hd9j"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.688921 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.688943 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cfc9265-de84-4047-9e01-69444aa4d9f5-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.688954 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5xcs\" (UniqueName: \"kubernetes.io/projected/1cfc9265-de84-4047-9e01-69444aa4d9f5-kube-api-access-d5xcs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.690308 5010 generic.go:334] "Generic (PLEG): container finished" podID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" exitCode=0 Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.690381 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a9fc9e37-6c7d-45d8-81e2-c6a175467c12","Type":"ContainerDied","Data":"a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56"} Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.690789 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-e911-account-create-update-x6wmv"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.694224 5010 generic.go:334] "Generic (PLEG): container finished" podID="1cfc9265-de84-4047-9e01-69444aa4d9f5" containerID="f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" exitCode=0 Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.694258 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"1cfc9265-de84-4047-9e01-69444aa4d9f5","Type":"ContainerDied","Data":"f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930"} Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.694288 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"1cfc9265-de84-4047-9e01-69444aa4d9f5","Type":"ContainerDied","Data":"109c6fcb3b5f82d3cbd78e2c85049a846c7cede4f147afff2cc9dcff3a5ef005"} Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.694306 5010 scope.go:117] "RemoveContainer" containerID="f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.694323 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.702732 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-e911-account-create-update-x6wmv"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.704220 5010 generic.go:334] "Generic (PLEG): container finished" podID="d6093731-a529-4e5b-94bd-4948ab30cedc" containerID="b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d" exitCode=0 Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.704276 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7b9b5b699d-rh4fw" event={"ID":"d6093731-a529-4e5b-94bd-4948ab30cedc","Type":"ContainerDied","Data":"b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d"} Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.704299 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-7b9b5b699d-rh4fw" event={"ID":"d6093731-a529-4e5b-94bd-4948ab30cedc","Type":"ContainerDied","Data":"ebfc8ca5daa68fe70d7f0bc633588997ab46efd2051f186e81f4453b1902d1e9"} Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.704359 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-7b9b5b699d-rh4fw" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.713669 5010 generic.go:334] "Generic (PLEG): container finished" podID="3c00abcf-4e27-48ae-be52-a92cbd24957c" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" exitCode=0 Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.713702 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3c00abcf-4e27-48ae-be52-a92cbd24957c","Type":"ContainerDied","Data":"b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe"} Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.728986 5010 scope.go:117] "RemoveContainer" containerID="f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.729094 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 15:52:54 crc kubenswrapper[5010]: E1126 15:52:54.731724 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930\": container with ID starting with f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930 not found: ID does not exist" containerID="f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.731825 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930"} err="failed to get container status \"f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930\": rpc error: code = NotFound desc = could not find container \"f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930\": container with ID starting with f375e3f9c393665a0a1bf6749fee592b16c08d947bfb12624dd2341032abc930 not found: ID does not exist" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.731882 5010 scope.go:117] "RemoveContainer" containerID="b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.741846 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.753077 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-7b9b5b699d-rh4fw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.760001 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-7b9b5b699d-rh4fw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.761279 5010 scope.go:117] "RemoveContainer" containerID="b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d" Nov 26 15:52:54 crc kubenswrapper[5010]: E1126 15:52:54.761778 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d\": container with ID starting with b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d not found: ID does not exist" containerID="b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.761813 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d"} err="failed to get container status \"b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d\": rpc error: code = NotFound desc = could not find container \"b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d\": container with ID starting with b98d36cdef31f7380d31fb7d8c0283bd633d022d715b23df5b5b64646facfc7d not found: ID does not exist" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.822460 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-qgvnw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.827764 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-qgvnw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.844764 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron4616-account-delete-ktdvj"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.860930 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4616-account-create-update-tsdsw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.869770 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-4616-account-create-update-tsdsw"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.873329 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron4616-account-delete-ktdvj"] Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.933883 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.943019 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.994475 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-config-data\") pod \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.994545 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-config-data\") pod \"3c00abcf-4e27-48ae-be52-a92cbd24957c\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.994566 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-combined-ca-bundle\") pod \"3c00abcf-4e27-48ae-be52-a92cbd24957c\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.994642 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s5jbw\" (UniqueName: \"kubernetes.io/projected/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-kube-api-access-s5jbw\") pod \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.994776 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xg9v7\" (UniqueName: \"kubernetes.io/projected/3c00abcf-4e27-48ae-be52-a92cbd24957c-kube-api-access-xg9v7\") pod \"3c00abcf-4e27-48ae-be52-a92cbd24957c\" (UID: \"3c00abcf-4e27-48ae-be52-a92cbd24957c\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.994806 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-combined-ca-bundle\") pod \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\" (UID: \"a9fc9e37-6c7d-45d8-81e2-c6a175467c12\") " Nov 26 15:52:54 crc kubenswrapper[5010]: I1126 15:52:54.999678 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c00abcf-4e27-48ae-be52-a92cbd24957c-kube-api-access-xg9v7" (OuterVolumeSpecName: "kube-api-access-xg9v7") pod "3c00abcf-4e27-48ae-be52-a92cbd24957c" (UID: "3c00abcf-4e27-48ae-be52-a92cbd24957c"). InnerVolumeSpecName "kube-api-access-xg9v7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.002398 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-kube-api-access-s5jbw" (OuterVolumeSpecName: "kube-api-access-s5jbw") pod "a9fc9e37-6c7d-45d8-81e2-c6a175467c12" (UID: "a9fc9e37-6c7d-45d8-81e2-c6a175467c12"). InnerVolumeSpecName "kube-api-access-s5jbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.019261 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-config-data" (OuterVolumeSpecName: "config-data") pod "3c00abcf-4e27-48ae-be52-a92cbd24957c" (UID: "3c00abcf-4e27-48ae-be52-a92cbd24957c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.021329 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9fc9e37-6c7d-45d8-81e2-c6a175467c12" (UID: "a9fc9e37-6c7d-45d8-81e2-c6a175467c12"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.021654 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3c00abcf-4e27-48ae-be52-a92cbd24957c" (UID: "3c00abcf-4e27-48ae-be52-a92cbd24957c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.024594 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-config-data" (OuterVolumeSpecName: "config-data") pod "a9fc9e37-6c7d-45d8-81e2-c6a175467c12" (UID: "a9fc9e37-6c7d-45d8-81e2-c6a175467c12"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.095936 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s5jbw\" (UniqueName: \"kubernetes.io/projected/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-kube-api-access-s5jbw\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.095972 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xg9v7\" (UniqueName: \"kubernetes.io/projected/3c00abcf-4e27-48ae-be52-a92cbd24957c-kube-api-access-xg9v7\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.095986 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.096000 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9fc9e37-6c7d-45d8-81e2-c6a175467c12-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.096012 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.096023 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3c00abcf-4e27-48ae-be52-a92cbd24957c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.375761 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.376830 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.376978 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.377473 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.377510 5010 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.378640 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.380153 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.380190 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.427956 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.606079 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-sg-core-conf-yaml\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.606430 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fnl48\" (UniqueName: \"kubernetes.io/projected/c1c3c42e-0126-41e6-9536-d5096eb44680-kube-api-access-fnl48\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.606521 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-combined-ca-bundle\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.606640 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-log-httpd\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.606756 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-scripts\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.606881 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-run-httpd\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.606952 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-config-data\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.607041 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-ceilometer-tls-certs\") pod \"c1c3c42e-0126-41e6-9536-d5096eb44680\" (UID: \"c1c3c42e-0126-41e6-9536-d5096eb44680\") " Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.608652 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.608828 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.613685 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-scripts" (OuterVolumeSpecName: "scripts") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.613758 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1c3c42e-0126-41e6-9536-d5096eb44680-kube-api-access-fnl48" (OuterVolumeSpecName: "kube-api-access-fnl48") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "kube-api-access-fnl48". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.642968 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.668187 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.702966 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.708933 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.709417 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.709436 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.709451 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c1c3c42e-0126-41e6-9536-d5096eb44680-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.709462 5010 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.709474 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.709486 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fnl48\" (UniqueName: \"kubernetes.io/projected/c1c3c42e-0126-41e6-9536-d5096eb44680-kube-api-access-fnl48\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.727996 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"3c00abcf-4e27-48ae-be52-a92cbd24957c","Type":"ContainerDied","Data":"56cbcb98173f0b8ca158f1fa0d577900ae08393612173b33da86f9854f0d3d87"} Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.728271 5010 scope.go:117] "RemoveContainer" containerID="b5e81edf7865ac2aa43e6d69fa9f8855257ce37a09fc8822060edaf50dd522fe" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.728148 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.732023 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a9fc9e37-6c7d-45d8-81e2-c6a175467c12","Type":"ContainerDied","Data":"f500719e4fc56901646d79e8a6b106901abfebb09904e54c377b40efd7267425"} Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.732223 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.740985 5010 generic.go:334] "Generic (PLEG): container finished" podID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerID="573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451" exitCode=0 Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.741039 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.741041 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerDied","Data":"573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451"} Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.741649 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c1c3c42e-0126-41e6-9536-d5096eb44680","Type":"ContainerDied","Data":"5f6d223dc8f9dbd9d35de346e7846dd03031ead382790502d2a173c3161b284e"} Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.752538 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-config-data" (OuterVolumeSpecName: "config-data") pod "c1c3c42e-0126-41e6-9536-d5096eb44680" (UID: "c1c3c42e-0126-41e6-9536-d5096eb44680"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.762338 5010 scope.go:117] "RemoveContainer" containerID="a662d8219e80fbd02728b7c30b775bd848970371cf8994c6722db891a694bf56" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.767822 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.778914 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.783147 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.798888 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.810004 5010 scope.go:117] "RemoveContainer" containerID="4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.811702 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c1c3c42e-0126-41e6-9536-d5096eb44680-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.843786 5010 scope.go:117] "RemoveContainer" containerID="f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.866084 5010 scope.go:117] "RemoveContainer" containerID="573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.882953 5010 scope.go:117] "RemoveContainer" containerID="6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.900406 5010 scope.go:117] "RemoveContainer" containerID="4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a" Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.900906 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a\": container with ID starting with 4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a not found: ID does not exist" containerID="4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.901024 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a"} err="failed to get container status \"4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a\": rpc error: code = NotFound desc = could not find container \"4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a\": container with ID starting with 4929ff3f63980c82ca160fc52df3ab0e2760e480b2c9e003563a91beaacad61a not found: ID does not exist" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.901142 5010 scope.go:117] "RemoveContainer" containerID="f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.901081 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a45c0f6-649b-4b48-8245-4f70da1c3a4f" path="/var/lib/kubelet/pods/0a45c0f6-649b-4b48-8245-4f70da1c3a4f/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.901552 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c\": container with ID starting with f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c not found: ID does not exist" containerID="f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.901586 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c"} err="failed to get container status \"f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c\": rpc error: code = NotFound desc = could not find container \"f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c\": container with ID starting with f6648019ad773e5c84ad7c57482664cb378d5549145035e3db9a5129bbcae22c not found: ID does not exist" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.901607 5010 scope.go:117] "RemoveContainer" containerID="573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451" Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.902106 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451\": container with ID starting with 573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451 not found: ID does not exist" containerID="573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.902140 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451"} err="failed to get container status \"573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451\": rpc error: code = NotFound desc = could not find container \"573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451\": container with ID starting with 573a87575df86818d1bdc34627c8812741770fe843749cd72dc7a34c4bb05451 not found: ID does not exist" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.902170 5010 scope.go:117] "RemoveContainer" containerID="6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b" Nov 26 15:52:55 crc kubenswrapper[5010]: E1126 15:52:55.902436 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b\": container with ID starting with 6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b not found: ID does not exist" containerID="6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.902465 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b"} err="failed to get container status \"6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b\": rpc error: code = NotFound desc = could not find container \"6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b\": container with ID starting with 6eb59487bd99456d596db7fbe95199463d1fc0fce1135937d634f25850d4d26b not found: ID does not exist" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.903053 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cfc9265-de84-4047-9e01-69444aa4d9f5" path="/var/lib/kubelet/pods/1cfc9265-de84-4047-9e01-69444aa4d9f5/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.903820 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1fe9c714-9055-4b2a-b417-f24e02a47fac" path="/var/lib/kubelet/pods/1fe9c714-9055-4b2a-b417-f24e02a47fac/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.904378 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e8dfd8a-0624-4f78-8c35-c6710328de9d" path="/var/lib/kubelet/pods/2e8dfd8a-0624-4f78-8c35-c6710328de9d/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.905614 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35439472-3a5f-450f-9fcc-2a739253ad5b" path="/var/lib/kubelet/pods/35439472-3a5f-450f-9fcc-2a739253ad5b/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.906389 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37e7e487-28ea-405b-a645-a85aa94e12d2" path="/var/lib/kubelet/pods/37e7e487-28ea-405b-a645-a85aa94e12d2/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.907042 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c00abcf-4e27-48ae-be52-a92cbd24957c" path="/var/lib/kubelet/pods/3c00abcf-4e27-48ae-be52-a92cbd24957c/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.908147 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fa0a723-c228-4246-a4de-6718bd2be270" path="/var/lib/kubelet/pods/4fa0a723-c228-4246-a4de-6718bd2be270/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.917687 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62328141-677e-41a9-84ae-413c9b3ce15a" path="/var/lib/kubelet/pods/62328141-677e-41a9-84ae-413c9b3ce15a/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.920000 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7863fcf1-2cbd-44d2-8db8-bb9c896f70c4" path="/var/lib/kubelet/pods/7863fcf1-2cbd-44d2-8db8-bb9c896f70c4/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.920465 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e02370f-1b63-47f7-8d66-ba7c94310c38" path="/var/lib/kubelet/pods/7e02370f-1b63-47f7-8d66-ba7c94310c38/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.920938 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f3aae36-d899-446c-9cf0-9ee7c7218c98" path="/var/lib/kubelet/pods/7f3aae36-d899-446c-9cf0-9ee7c7218c98/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.923778 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" path="/var/lib/kubelet/pods/99fb2212-9383-48c9-b976-1e93a19c3ce1/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.924376 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d447aec-9b58-4184-8f6f-2b10d849d8c0" path="/var/lib/kubelet/pods/9d447aec-9b58-4184-8f6f-2b10d849d8c0/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.925304 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a36bfb95-ac5b-44ff-8b33-5f2e10ebea69" path="/var/lib/kubelet/pods/a36bfb95-ac5b-44ff-8b33-5f2e10ebea69/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.925996 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a54e75fa-7b8b-4159-840f-983ec1a40e0d" path="/var/lib/kubelet/pods/a54e75fa-7b8b-4159-840f-983ec1a40e0d/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.926462 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" path="/var/lib/kubelet/pods/a9fc9e37-6c7d-45d8-81e2-c6a175467c12/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.927201 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae8d19c8-ae98-467d-b061-856521d7029d" path="/var/lib/kubelet/pods/ae8d19c8-ae98-467d-b061-856521d7029d/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.928340 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb0e3931-24cf-4410-98c7-74cba52c93ae" path="/var/lib/kubelet/pods/bb0e3931-24cf-4410-98c7-74cba52c93ae/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.928839 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbb49afd-179d-425f-aeb1-64a64c66fb98" path="/var/lib/kubelet/pods/bbb49afd-179d-425f-aeb1-64a64c66fb98/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.929390 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5" path="/var/lib/kubelet/pods/ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.930378 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6093731-a529-4e5b-94bd-4948ab30cedc" path="/var/lib/kubelet/pods/d6093731-a529-4e5b-94bd-4948ab30cedc/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.930908 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e6ad10a8-9fed-45ae-830a-01f1b3147cae" path="/var/lib/kubelet/pods/e6ad10a8-9fed-45ae-830a-01f1b3147cae/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.931372 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8c11462-1366-4e0f-9003-6079b25c6b04" path="/var/lib/kubelet/pods/e8c11462-1366-4e0f-9003-6079b25c6b04/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.931952 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e962487c-09d8-4b78-aec6-6ed212c3bd75" path="/var/lib/kubelet/pods/e962487c-09d8-4b78-aec6-6ed212c3bd75/volumes" Nov 26 15:52:55 crc kubenswrapper[5010]: I1126 15:52:55.932836 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc74f571-aa5a-4030-800c-2945c869fdd5" path="/var/lib/kubelet/pods/fc74f571-aa5a-4030-800c-2945c869fdd5/volumes" Nov 26 15:52:56 crc kubenswrapper[5010]: I1126 15:52:56.070839 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:52:56 crc kubenswrapper[5010]: I1126 15:52:56.077551 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 15:52:56 crc kubenswrapper[5010]: I1126 15:52:56.891530 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:52:56 crc kubenswrapper[5010]: E1126 15:52:56.891879 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:52:57 crc kubenswrapper[5010]: I1126 15:52:57.901004 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" path="/var/lib/kubelet/pods/c1c3c42e-0126-41e6-9536-d5096eb44680/volumes" Nov 26 15:53:00 crc kubenswrapper[5010]: I1126 15:53:00.080655 5010 scope.go:117] "RemoveContainer" containerID="969966e67ae90d742a77f84466bf294b5b02f4399d3b508d206d36643320950e" Nov 26 15:53:00 crc kubenswrapper[5010]: I1126 15:53:00.123063 5010 scope.go:117] "RemoveContainer" containerID="b8ec1a9cdb303364bb02c0d64077536cd7666be13b0d6b8dbdc15f743fa920ab" Nov 26 15:53:00 crc kubenswrapper[5010]: I1126 15:53:00.149000 5010 scope.go:117] "RemoveContainer" containerID="8d6da5ddaf26f97f6c9fdf7571a9a6a9cbe1db0ea7499192d4c835fc1a8782bb" Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.374659 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.375039 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.375330 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.375368 5010 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.376657 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.378253 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.379482 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:00 crc kubenswrapper[5010]: E1126 15:53:00.379521 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.375722 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.376470 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.376623 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.377014 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.377045 5010 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.377637 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.379675 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:05 crc kubenswrapper[5010]: E1126 15:53:05.379717 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.484057 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.519502 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-public-tls-certs\") pod \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.519576 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-ovndb-tls-certs\") pod \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.519618 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-internal-tls-certs\") pod \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.519655 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-combined-ca-bundle\") pod \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.519680 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-config\") pod \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.519758 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-httpd-config\") pod \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.519786 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w9bg\" (UniqueName: \"kubernetes.io/projected/5eee7686-f868-4e9e-bf61-b108eeb88bfa-kube-api-access-8w9bg\") pod \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\" (UID: \"5eee7686-f868-4e9e-bf61-b108eeb88bfa\") " Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.528836 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "5eee7686-f868-4e9e-bf61-b108eeb88bfa" (UID: "5eee7686-f868-4e9e-bf61-b108eeb88bfa"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.535202 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5eee7686-f868-4e9e-bf61-b108eeb88bfa-kube-api-access-8w9bg" (OuterVolumeSpecName: "kube-api-access-8w9bg") pod "5eee7686-f868-4e9e-bf61-b108eeb88bfa" (UID: "5eee7686-f868-4e9e-bf61-b108eeb88bfa"). InnerVolumeSpecName "kube-api-access-8w9bg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.568486 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "5eee7686-f868-4e9e-bf61-b108eeb88bfa" (UID: "5eee7686-f868-4e9e-bf61-b108eeb88bfa"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.571034 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-config" (OuterVolumeSpecName: "config") pod "5eee7686-f868-4e9e-bf61-b108eeb88bfa" (UID: "5eee7686-f868-4e9e-bf61-b108eeb88bfa"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.577458 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "5eee7686-f868-4e9e-bf61-b108eeb88bfa" (UID: "5eee7686-f868-4e9e-bf61-b108eeb88bfa"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.582594 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5eee7686-f868-4e9e-bf61-b108eeb88bfa" (UID: "5eee7686-f868-4e9e-bf61-b108eeb88bfa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.587373 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "5eee7686-f868-4e9e-bf61-b108eeb88bfa" (UID: "5eee7686-f868-4e9e-bf61-b108eeb88bfa"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.621296 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.621542 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.621626 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.621705 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.621815 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w9bg\" (UniqueName: \"kubernetes.io/projected/5eee7686-f868-4e9e-bf61-b108eeb88bfa-kube-api-access-8w9bg\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.621944 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.622021 5010 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/5eee7686-f868-4e9e-bf61-b108eeb88bfa-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.873941 5010 generic.go:334] "Generic (PLEG): container finished" podID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerID="7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b" exitCode=0 Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.874006 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9c764c5c-5p8zc" event={"ID":"5eee7686-f868-4e9e-bf61-b108eeb88bfa","Type":"ContainerDied","Data":"7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b"} Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.874017 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c9c764c5c-5p8zc" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.874058 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c9c764c5c-5p8zc" event={"ID":"5eee7686-f868-4e9e-bf61-b108eeb88bfa","Type":"ContainerDied","Data":"c6ee01db2fd8acea0588e24b22ccc2540c42999b595eab2cf463d2e2b9ee5e22"} Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.874088 5010 scope.go:117] "RemoveContainer" containerID="0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.921433 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5c9c764c5c-5p8zc"] Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.923790 5010 scope.go:117] "RemoveContainer" containerID="7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.941277 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5c9c764c5c-5p8zc"] Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.989646 5010 scope.go:117] "RemoveContainer" containerID="0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd" Nov 26 15:53:08 crc kubenswrapper[5010]: E1126 15:53:08.990446 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd\": container with ID starting with 0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd not found: ID does not exist" containerID="0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.990516 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd"} err="failed to get container status \"0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd\": rpc error: code = NotFound desc = could not find container \"0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd\": container with ID starting with 0f4f6370050fb868b61f1cd115d29952c8d934becfdfc2957652c9d7565744cd not found: ID does not exist" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.990548 5010 scope.go:117] "RemoveContainer" containerID="7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b" Nov 26 15:53:08 crc kubenswrapper[5010]: E1126 15:53:08.991029 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b\": container with ID starting with 7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b not found: ID does not exist" containerID="7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b" Nov 26 15:53:08 crc kubenswrapper[5010]: I1126 15:53:08.991074 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b"} err="failed to get container status \"7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b\": rpc error: code = NotFound desc = could not find container \"7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b\": container with ID starting with 7f8c19e1a136a700c618a7610fc7ad9906a80ee675500f9e69cebc8a5dd7e03b not found: ID does not exist" Nov 26 15:53:09 crc kubenswrapper[5010]: I1126 15:53:09.900738 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" path="/var/lib/kubelet/pods/5eee7686-f868-4e9e-bf61-b108eeb88bfa/volumes" Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.375749 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.376902 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.377316 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.377358 5010 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.378278 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.379638 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.381370 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Nov 26 15:53:10 crc kubenswrapper[5010]: E1126 15:53:10.381494 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-f7n92" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:53:11 crc kubenswrapper[5010]: I1126 15:53:11.892306 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:53:11 crc kubenswrapper[5010]: E1126 15:53:11.893112 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:53:14 crc kubenswrapper[5010]: I1126 15:53:14.941251 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-f7n92_d1c2d398-f284-40d9-beb4-cd3121568f5a/ovs-vswitchd/0.log" Nov 26 15:53:14 crc kubenswrapper[5010]: I1126 15:53:14.942721 5010 generic.go:334] "Generic (PLEG): container finished" podID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" exitCode=137 Nov 26 15:53:14 crc kubenswrapper[5010]: I1126 15:53:14.942751 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerDied","Data":"5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5"} Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.067789 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-f7n92_d1c2d398-f284-40d9-beb4-cd3121568f5a/ovs-vswitchd/0.log" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.068764 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.227561 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-lib\") pod \"d1c2d398-f284-40d9-beb4-cd3121568f5a\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.227724 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-etc-ovs\") pod \"d1c2d398-f284-40d9-beb4-cd3121568f5a\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.227729 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-lib" (OuterVolumeSpecName: "var-lib") pod "d1c2d398-f284-40d9-beb4-cd3121568f5a" (UID: "d1c2d398-f284-40d9-beb4-cd3121568f5a"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.227789 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94cm8\" (UniqueName: \"kubernetes.io/projected/d1c2d398-f284-40d9-beb4-cd3121568f5a-kube-api-access-94cm8\") pod \"d1c2d398-f284-40d9-beb4-cd3121568f5a\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.227836 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-run\") pod \"d1c2d398-f284-40d9-beb4-cd3121568f5a\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.227853 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "d1c2d398-f284-40d9-beb4-cd3121568f5a" (UID: "d1c2d398-f284-40d9-beb4-cd3121568f5a"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.227979 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-log\") pod \"d1c2d398-f284-40d9-beb4-cd3121568f5a\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.228005 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d1c2d398-f284-40d9-beb4-cd3121568f5a-scripts\") pod \"d1c2d398-f284-40d9-beb4-cd3121568f5a\" (UID: \"d1c2d398-f284-40d9-beb4-cd3121568f5a\") " Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.228033 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-run" (OuterVolumeSpecName: "var-run") pod "d1c2d398-f284-40d9-beb4-cd3121568f5a" (UID: "d1c2d398-f284-40d9-beb4-cd3121568f5a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.228098 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-log" (OuterVolumeSpecName: "var-log") pod "d1c2d398-f284-40d9-beb4-cd3121568f5a" (UID: "d1c2d398-f284-40d9-beb4-cd3121568f5a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.228375 5010 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.228399 5010 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-lib\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.228412 5010 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-etc-ovs\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.228423 5010 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d1c2d398-f284-40d9-beb4-cd3121568f5a-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.229251 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1c2d398-f284-40d9-beb4-cd3121568f5a-scripts" (OuterVolumeSpecName: "scripts") pod "d1c2d398-f284-40d9-beb4-cd3121568f5a" (UID: "d1c2d398-f284-40d9-beb4-cd3121568f5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.233896 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1c2d398-f284-40d9-beb4-cd3121568f5a-kube-api-access-94cm8" (OuterVolumeSpecName: "kube-api-access-94cm8") pod "d1c2d398-f284-40d9-beb4-cd3121568f5a" (UID: "d1c2d398-f284-40d9-beb4-cd3121568f5a"). InnerVolumeSpecName "kube-api-access-94cm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.329624 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94cm8\" (UniqueName: \"kubernetes.io/projected/d1c2d398-f284-40d9-beb4-cd3121568f5a-kube-api-access-94cm8\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.329679 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d1c2d398-f284-40d9-beb4-cd3121568f5a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.952747 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-f7n92_d1c2d398-f284-40d9-beb4-cd3121568f5a/ovs-vswitchd/0.log" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.953770 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-f7n92" event={"ID":"d1c2d398-f284-40d9-beb4-cd3121568f5a","Type":"ContainerDied","Data":"22f32d683476f41ee20dbaa98c03ee0e656f0251623e4f6aa417942bbbef2634"} Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.953834 5010 scope.go:117] "RemoveContainer" containerID="5e02166669370fe08d771626894f3b70a15f7397978568ef5ffd4111213444d5" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.953884 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-f7n92" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.963451 5010 generic.go:334] "Generic (PLEG): container finished" podID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerID="e08bba5d0a854ba8aa4fb7af34e20011b90f803f2ae0c820fde74890a8ed506d" exitCode=137 Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.963493 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"e08bba5d0a854ba8aa4fb7af34e20011b90f803f2ae0c820fde74890a8ed506d"} Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.984848 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-f7n92"] Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.991564 5010 scope.go:117] "RemoveContainer" containerID="b5812a17c7342c0455e9b88a0d3eaf53133e8bf225b40e8686e9544512e8ac0c" Nov 26 15:53:15 crc kubenswrapper[5010]: I1126 15:53:15.993513 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-f7n92"] Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.015783 5010 scope.go:117] "RemoveContainer" containerID="684a7192db7883e6dfeb8517a5b35048195da02842af78b06b5df20e3d3d7f64" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.165331 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.243157 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.243255 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gmddg\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-kube-api-access-gmddg\") pod \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.243296 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-lock\") pod \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.243343 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-cache\") pod \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.243363 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") pod \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\" (UID: \"1803fc99-2cc8-44e7-8ce5-eac5bc548f88\") " Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.243843 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-lock" (OuterVolumeSpecName: "lock") pod "1803fc99-2cc8-44e7-8ce5-eac5bc548f88" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.244024 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-cache" (OuterVolumeSpecName: "cache") pod "1803fc99-2cc8-44e7-8ce5-eac5bc548f88" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.250865 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "swift") pod "1803fc99-2cc8-44e7-8ce5-eac5bc548f88" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.250942 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-kube-api-access-gmddg" (OuterVolumeSpecName: "kube-api-access-gmddg") pod "1803fc99-2cc8-44e7-8ce5-eac5bc548f88" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88"). InnerVolumeSpecName "kube-api-access-gmddg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.250979 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "1803fc99-2cc8-44e7-8ce5-eac5bc548f88" (UID: "1803fc99-2cc8-44e7-8ce5-eac5bc548f88"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.345056 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.345102 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gmddg\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-kube-api-access-gmddg\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.345115 5010 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-lock\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.345125 5010 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-cache\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.345134 5010 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/1803fc99-2cc8-44e7-8ce5-eac5bc548f88-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.371169 5010 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.446782 5010 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.985588 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"1803fc99-2cc8-44e7-8ce5-eac5bc548f88","Type":"ContainerDied","Data":"3d1170e746304bb58d50df547965f6cd5bb72f3a501665cfdb4dafcff6dcc456"} Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.987883 5010 scope.go:117] "RemoveContainer" containerID="e08bba5d0a854ba8aa4fb7af34e20011b90f803f2ae0c820fde74890a8ed506d" Nov 26 15:53:16 crc kubenswrapper[5010]: I1126 15:53:16.985632 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.030074 5010 scope.go:117] "RemoveContainer" containerID="956193edff3817c0a6aaac66e75e2a2cbc0c70d7f96f5cf29968a35548725373" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.038851 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.045688 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.054022 5010 scope.go:117] "RemoveContainer" containerID="2aac1aac86049fceb0d32a0aa7530aacebb03989a907a006110f6386991013b9" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.077473 5010 scope.go:117] "RemoveContainer" containerID="b290a5f7ec51985b250b6f158fb41d40ac9ddeab529cc0032fbce6f190f4fde3" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.099080 5010 scope.go:117] "RemoveContainer" containerID="75089565aaa9cf8b99c1bbb2c38ff4c538bc9761ad1f7d65a1db0333de3c360e" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.118857 5010 scope.go:117] "RemoveContainer" containerID="1227084a08d26738373e26d1eaa54ec1c0e0c92d3d3601f6a05af2770c69551e" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.136963 5010 scope.go:117] "RemoveContainer" containerID="ec35df7082d1bd361a74495ef68869fe5465b44b7de7cab15bbe9c7d46d0924f" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.154108 5010 scope.go:117] "RemoveContainer" containerID="e0678f8f20e1d205632c07cb24e8ce9e89576b47c8ef44f378b9a0a0dfb4ed62" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.181578 5010 scope.go:117] "RemoveContainer" containerID="4b0402574e5cf70154b6681989bbdbd847b3e31c0811a89c6cfcc7aaf711a5f1" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.199842 5010 scope.go:117] "RemoveContainer" containerID="6d794e589ecc207f0a022410f47d3aa359d8e1b3c5503eda2b2b369e69a171cf" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.217075 5010 scope.go:117] "RemoveContainer" containerID="18b9616512ee9afb2cfc002c2a3a4b7c6722774ff0238f548f51aad7f1e695a8" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.236139 5010 scope.go:117] "RemoveContainer" containerID="2315d69e082e6c260094225fe89d5d8817821a2dcf66915354208ff345c9a274" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.251859 5010 scope.go:117] "RemoveContainer" containerID="df9dfb68b38080d2f2517a40a46d8ae91eb3eca11c141ff220a21e22ce48690a" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.268115 5010 scope.go:117] "RemoveContainer" containerID="a92b03349dda704cc51977b5cdd2fcdd40871b506d74796925290a6da4ceb86e" Nov 26 15:53:17 crc kubenswrapper[5010]: I1126 15:53:17.291703 5010 scope.go:117] "RemoveContainer" containerID="55e059be841df7938e11264822fec73874738f64ef6b875efb95510e6965cf1b" Nov 26 15:53:18 crc kubenswrapper[5010]: I1126 15:53:18.078639 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" path="/var/lib/kubelet/pods/1803fc99-2cc8-44e7-8ce5-eac5bc548f88/volumes" Nov 26 15:53:18 crc kubenswrapper[5010]: I1126 15:53:18.082864 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" path="/var/lib/kubelet/pods/d1c2d398-f284-40d9-beb4-cd3121568f5a/volumes" Nov 26 15:53:18 crc kubenswrapper[5010]: I1126 15:53:18.542491 5010 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod776a1766-4e7d-4ea0-bd5b-18b6b352448a"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod776a1766-4e7d-4ea0-bd5b-18b6b352448a] : Timed out while waiting for systemd to remove kubepods-besteffort-pod776a1766_4e7d_4ea0_bd5b_18b6b352448a.slice" Nov 26 15:53:25 crc kubenswrapper[5010]: I1126 15:53:25.894883 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:53:25 crc kubenswrapper[5010]: E1126 15:53:25.896152 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:53:39 crc kubenswrapper[5010]: I1126 15:53:39.901554 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:53:39 crc kubenswrapper[5010]: E1126 15:53:39.902269 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:53:51 crc kubenswrapper[5010]: I1126 15:53:51.891765 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:53:51 crc kubenswrapper[5010]: E1126 15:53:51.892586 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:54:00 crc kubenswrapper[5010]: I1126 15:54:00.791909 5010 scope.go:117] "RemoveContainer" containerID="16e4286df9dc4da934acb63de73bc22d543350bd8f03c6db3553cb0bf829316f" Nov 26 15:54:00 crc kubenswrapper[5010]: I1126 15:54:00.832163 5010 scope.go:117] "RemoveContainer" containerID="167726dce406cc31ff231ebe5b4368ed3c91a0e2e0f481ef182f176efe9ce00d" Nov 26 15:54:00 crc kubenswrapper[5010]: I1126 15:54:00.863052 5010 scope.go:117] "RemoveContainer" containerID="8c6358d9a35379339618996fe36ec60f4eec6869c7729dabfb9f4d68803d62e7" Nov 26 15:54:00 crc kubenswrapper[5010]: I1126 15:54:00.894054 5010 scope.go:117] "RemoveContainer" containerID="cc1a141c6ceffeefcff4849801770b463e32e9bcbe7a5f8c4d3291a2d50429bb" Nov 26 15:54:00 crc kubenswrapper[5010]: I1126 15:54:00.934026 5010 scope.go:117] "RemoveContainer" containerID="dc1d1e75ba8a78442646d771e9dd84547cd380c72a7be91078c4b34fb518725a" Nov 26 15:54:00 crc kubenswrapper[5010]: I1126 15:54:00.958960 5010 scope.go:117] "RemoveContainer" containerID="237dca687157c8a51ce31a5eeb1e512bb81f5f3d888205c08f85c049cb9d9522" Nov 26 15:54:00 crc kubenswrapper[5010]: I1126 15:54:00.993865 5010 scope.go:117] "RemoveContainer" containerID="5adeb336a30ef297498044077e878f62a25ad2a35cf050ca984209cff33f4a8f" Nov 26 15:54:01 crc kubenswrapper[5010]: I1126 15:54:01.015495 5010 scope.go:117] "RemoveContainer" containerID="7919c80b2f17a78bdb933b9cc3b17600120fda6ef4e7cec81c2f04537e3abc35" Nov 26 15:54:01 crc kubenswrapper[5010]: I1126 15:54:01.043886 5010 scope.go:117] "RemoveContainer" containerID="6c4760ad1a714105713a33fea21344eef7544128a9495f5d5c75c58396e438da" Nov 26 15:54:01 crc kubenswrapper[5010]: I1126 15:54:01.065367 5010 scope.go:117] "RemoveContainer" containerID="6eb836eb928608159721296c8ca9e0e1446f9f961fc14b0267fa617ac7a36cfc" Nov 26 15:54:01 crc kubenswrapper[5010]: I1126 15:54:01.098137 5010 scope.go:117] "RemoveContainer" containerID="82c66190a4a384271219bce14f7eecdaa1199745d98b7a809afea4bb7e1faf9b" Nov 26 15:54:01 crc kubenswrapper[5010]: I1126 15:54:01.130098 5010 scope.go:117] "RemoveContainer" containerID="d4472dd857fa51dd467e2749e5440204642d3267b40603be780d958033a111c7" Nov 26 15:54:01 crc kubenswrapper[5010]: I1126 15:54:01.152385 5010 scope.go:117] "RemoveContainer" containerID="607a96fde7603b28f08e865d2b7295908a4e57642edd8a9f9b3bbac6fb68fb8e" Nov 26 15:54:01 crc kubenswrapper[5010]: I1126 15:54:01.182419 5010 scope.go:117] "RemoveContainer" containerID="e67acf107946bb91553ca398217826e217c2c0fbd53a70c45a76896f6169a43f" Nov 26 15:54:05 crc kubenswrapper[5010]: I1126 15:54:05.891373 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:54:05 crc kubenswrapper[5010]: E1126 15:54:05.892063 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:54:16 crc kubenswrapper[5010]: I1126 15:54:16.892168 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:54:16 crc kubenswrapper[5010]: E1126 15:54:16.893143 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:54:27 crc kubenswrapper[5010]: I1126 15:54:27.892502 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:54:27 crc kubenswrapper[5010]: E1126 15:54:27.893875 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:54:39 crc kubenswrapper[5010]: I1126 15:54:39.900954 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:54:39 crc kubenswrapper[5010]: E1126 15:54:39.904236 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:54:50 crc kubenswrapper[5010]: I1126 15:54:50.891921 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:54:50 crc kubenswrapper[5010]: E1126 15:54:50.892844 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:55:01 crc kubenswrapper[5010]: I1126 15:55:01.472027 5010 scope.go:117] "RemoveContainer" containerID="333b57ad505174d4546e1edf9a1b810ad396148113c96a3f290db461a21adfb1" Nov 26 15:55:01 crc kubenswrapper[5010]: I1126 15:55:01.532067 5010 scope.go:117] "RemoveContainer" containerID="ed3fcfd8e224708b247b12d8cfd27e62d3ef1808e4f71da44902a4741970224a" Nov 26 15:55:01 crc kubenswrapper[5010]: I1126 15:55:01.594273 5010 scope.go:117] "RemoveContainer" containerID="16c3ef01ba3fcc61ab3efc7446b80803148d81f97d6e11a28601111c5763f722" Nov 26 15:55:01 crc kubenswrapper[5010]: I1126 15:55:01.646345 5010 scope.go:117] "RemoveContainer" containerID="eef72c8ede1b706c5fd5317ea82d316b542309243d7659a4a8f9d540f0aeff1c" Nov 26 15:55:01 crc kubenswrapper[5010]: I1126 15:55:01.699664 5010 scope.go:117] "RemoveContainer" containerID="6bc9f8469188874f795d80b8019d6100b4a9920b1743b2e5c4b536a83dc31f8e" Nov 26 15:55:01 crc kubenswrapper[5010]: I1126 15:55:01.728809 5010 scope.go:117] "RemoveContainer" containerID="bb62a936b79835b6c73ac07392dd96fd3fc5d2d4ac67dcace4873b04bd1fc9b7" Nov 26 15:55:05 crc kubenswrapper[5010]: I1126 15:55:05.892001 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:55:05 crc kubenswrapper[5010]: E1126 15:55:05.893608 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:55:18 crc kubenswrapper[5010]: I1126 15:55:18.892080 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:55:18 crc kubenswrapper[5010]: E1126 15:55:18.893074 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:55:32 crc kubenswrapper[5010]: I1126 15:55:32.892371 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:55:32 crc kubenswrapper[5010]: E1126 15:55:32.893141 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:55:45 crc kubenswrapper[5010]: I1126 15:55:45.892516 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:55:45 crc kubenswrapper[5010]: E1126 15:55:45.896174 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:56:00 crc kubenswrapper[5010]: I1126 15:56:00.891286 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:56:00 crc kubenswrapper[5010]: E1126 15:56:00.892315 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:56:01 crc kubenswrapper[5010]: I1126 15:56:01.885903 5010 scope.go:117] "RemoveContainer" containerID="85ff0b0e3b7dd434e6128d567eeeeec11fd8ac7b2d055a682903e7034e63280c" Nov 26 15:56:01 crc kubenswrapper[5010]: I1126 15:56:01.922258 5010 scope.go:117] "RemoveContainer" containerID="e2648f8ab5e19664d085c18600c5012a94491dcb187ee60e1e3570ec0f86cc22" Nov 26 15:56:02 crc kubenswrapper[5010]: I1126 15:56:02.035298 5010 scope.go:117] "RemoveContainer" containerID="4248baa6552f5c6e89014c848cb17cfc27c39511119002d3f8686be69996f6fe" Nov 26 15:56:02 crc kubenswrapper[5010]: I1126 15:56:02.060828 5010 scope.go:117] "RemoveContainer" containerID="f5c52d88f44b865b5096b50805d5e2f59cef8516541529d9fdf1ac840da5d9c1" Nov 26 15:56:02 crc kubenswrapper[5010]: I1126 15:56:02.102072 5010 scope.go:117] "RemoveContainer" containerID="79c2fb1f54ff9a95cc4dcaa6cb5962ca09af991addf2ded2b30b24b2b7bbdfbe" Nov 26 15:56:02 crc kubenswrapper[5010]: I1126 15:56:02.130180 5010 scope.go:117] "RemoveContainer" containerID="b34d4f722a91c454494472df034a2abe16ed2231c30fd284d1678d1f21a2a6d7" Nov 26 15:56:02 crc kubenswrapper[5010]: I1126 15:56:02.149041 5010 scope.go:117] "RemoveContainer" containerID="9b495458e612128334395409d752e36730b0d6b39ff6af3ed4daa774634efdd7" Nov 26 15:56:02 crc kubenswrapper[5010]: I1126 15:56:02.167560 5010 scope.go:117] "RemoveContainer" containerID="c9e20dc7a4328ee3612ea58196285a255ba3e587797e6fd20342a957928a6a6b" Nov 26 15:56:11 crc kubenswrapper[5010]: I1126 15:56:11.891670 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:56:11 crc kubenswrapper[5010]: E1126 15:56:11.892430 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:56:23 crc kubenswrapper[5010]: I1126 15:56:23.891876 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:56:23 crc kubenswrapper[5010]: E1126 15:56:23.892762 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:56:34 crc kubenswrapper[5010]: I1126 15:56:34.891867 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:56:34 crc kubenswrapper[5010]: E1126 15:56:34.892657 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 15:56:48 crc kubenswrapper[5010]: I1126 15:56:48.891633 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 15:56:49 crc kubenswrapper[5010]: I1126 15:56:49.197906 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"238c759f629d5116acbbb07eb94cd9109f06028f6b7ad27094247fc4ae32e555"} Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.292057 5010 scope.go:117] "RemoveContainer" containerID="0cfd653091e91e0ee452317bab6a6a58cc99cd5b9eff404b2e0356747e85fbfa" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.321558 5010 scope.go:117] "RemoveContainer" containerID="11b2381ddcc7f6d25f34d85e987cdc506d5085296b8785658e79bf5262db374c" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.382853 5010 scope.go:117] "RemoveContainer" containerID="0f45d7eea2abefc4913d1a7e50d9d1584398760133b14b1ec28110f9bc2a3322" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.402037 5010 scope.go:117] "RemoveContainer" containerID="6806ec765a15b121cc35d21ba23121403bf62a62cbc3ac67abb3c89a0321b696" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.462503 5010 scope.go:117] "RemoveContainer" containerID="bd5dcd159d98522a1fa453a1295f760782d174209dffc2ff416e800b67e207f6" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.483323 5010 scope.go:117] "RemoveContainer" containerID="8e37e04d466ca260e3f8f7ff81def050743b2ca8bf2294353ecc4ab35c6e4e5f" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.507892 5010 scope.go:117] "RemoveContainer" containerID="653bfa6b3829db91148e50d25975da256293544493c1a848948cdc245d1ddb31" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.554594 5010 scope.go:117] "RemoveContainer" containerID="1c95b91556706bc0e785fef8d7a8aa8557546e4cb115de25ca1349b5cc34538f" Nov 26 15:57:02 crc kubenswrapper[5010]: I1126 15:57:02.581284 5010 scope.go:117] "RemoveContainer" containerID="9e380480e0ed87dc131e2bebbe255dfbe9ddfc2684fa87e255d79aa89c5c350c" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.736531 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jztnl"] Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737429 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" containerName="nova-scheduler-scheduler" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737442 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" containerName="nova-scheduler-scheduler" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737452 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737458 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737476 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737482 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737493 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed209eb8-b2b9-4101-9eda-2762259ea2cd" containerName="kube-state-metrics" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737499 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed209eb8-b2b9-4101-9eda-2762259ea2cd" containerName="kube-state-metrics" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737517 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6093731-a529-4e5b-94bd-4948ab30cedc" containerName="keystone-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737524 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6093731-a529-4e5b-94bd-4948ab30cedc" containerName="keystone-api" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737532 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737537 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737546 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-expirer" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737551 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-expirer" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737562 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737568 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-log" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737580 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerName="rabbitmq" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737586 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerName="rabbitmq" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737592 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737597 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737604 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9940cbe6-c323-4320-9e45-463e5c023156" containerName="rabbitmq" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737610 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9940cbe6-c323-4320-9e45-463e5c023156" containerName="rabbitmq" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737619 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737624 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener-log" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737637 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e02370f-1b63-47f7-8d66-ba7c94310c38" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737643 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e02370f-1b63-47f7-8d66-ba7c94310c38" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737652 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737659 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-server" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737669 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737675 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737682 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server-init" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737688 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server-init" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737698 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737783 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-server" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737790 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c00abcf-4e27-48ae-be52-a92cbd24957c" containerName="nova-cell0-conductor-conductor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737795 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c00abcf-4e27-48ae-be52-a92cbd24957c" containerName="nova-cell0-conductor-conductor" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737805 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737811 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737818 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="swift-recon-cron" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737826 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="swift-recon-cron" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737835 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9940cbe6-c323-4320-9e45-463e5c023156" containerName="setup-container" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737842 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9940cbe6-c323-4320-9e45-463e5c023156" containerName="setup-container" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737851 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerName="galera" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737857 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerName="galera" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737863 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737870 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737877 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737884 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-api" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737892 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-reaper" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737898 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-reaper" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737906 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-updater" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737912 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-updater" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737920 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737926 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-server" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737933 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737940 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api-log" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737947 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737953 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737962 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="rsync" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737968 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="rsync" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737976 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cfc9265-de84-4047-9e01-69444aa4d9f5" containerName="nova-cell1-conductor-conductor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737982 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cfc9265-de84-4047-9e01-69444aa4d9f5" containerName="nova-cell1-conductor-conductor" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.737990 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.737996 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738018 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738024 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738036 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37e7e487-28ea-405b-a645-a85aa94e12d2" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738042 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="37e7e487-28ea-405b-a645-a85aa94e12d2" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738052 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738058 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738067 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-updater" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738072 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-updater" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738080 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6243a3e1-835d-4150-afea-1f2bb0032065" containerName="memcached" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738088 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6243a3e1-835d-4150-afea-1f2bb0032065" containerName="memcached" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738097 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738103 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738114 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738120 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738131 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e8dfd8a-0624-4f78-8c35-c6710328de9d" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738137 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e8dfd8a-0624-4f78-8c35-c6710328de9d" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738145 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738152 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738162 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35439472-3a5f-450f-9fcc-2a739253ad5b" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738169 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="35439472-3a5f-450f-9fcc-2a739253ad5b" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738180 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerName="mysql-bootstrap" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738187 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerName="mysql-bootstrap" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738196 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738202 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker-log" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738212 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-notification-agent" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738218 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-notification-agent" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738224 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-central-agent" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738229 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-central-agent" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738239 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738244 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738255 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738260 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api-log" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738269 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a45c0f6-649b-4b48-8245-4f70da1c3a4f" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738275 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a45c0f6-649b-4b48-8245-4f70da1c3a4f" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738287 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="proxy-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738293 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="proxy-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738303 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerName="setup-container" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738309 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerName="setup-container" Nov 26 15:57:07 crc kubenswrapper[5010]: E1126 15:57:07.738319 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="sg-core" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738325 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="sg-core" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738454 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738467 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-expirer" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738474 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed209eb8-b2b9-4101-9eda-2762259ea2cd" containerName="kube-state-metrics" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738481 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738489 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9940cbe6-c323-4320-9e45-463e5c023156" containerName="rabbitmq" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738496 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6093731-a529-4e5b-94bd-4948ab30cedc" containerName="keystone-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738504 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738514 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738522 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738532 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738544 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovsdb-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738553 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9687c9f4-9131-4c43-a1f2-2faf3040e499" containerName="barbican-keystone-listener" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738560 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5eee7686-f868-4e9e-bf61-b108eeb88bfa" containerName="neutron-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738567 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1c2d398-f284-40d9-beb4-cd3121568f5a" containerName="ovs-vswitchd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738576 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a45c0f6-649b-4b48-8245-4f70da1c3a4f" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738586 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5da7cc9-18f2-4dc5-a431-9ffc2d8f9d25" containerName="rabbitmq" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738596 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c00abcf-4e27-48ae-be52-a92cbd24957c" containerName="nova-cell0-conductor-conductor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738604 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738614 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738622 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-reaper" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738631 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="sg-core" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738639 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="35439472-3a5f-450f-9fcc-2a739253ad5b" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738648 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738655 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9fc9e37-6c7d-45d8-81e2-c6a175467c12" containerName="nova-scheduler-scheduler" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738663 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738669 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e02370f-1b63-47f7-8d66-ba7c94310c38" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738677 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6243a3e1-835d-4150-afea-1f2bb0032065" containerName="memcached" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738685 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="rsync" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738691 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="swift-recon-cron" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738698 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="99fb2212-9383-48c9-b976-1e93a19c3ce1" containerName="galera" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738726 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-updater" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738736 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="object-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738746 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-auditor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738755 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c646c6a9-d38e-4fe2-9fac-2ca0fe9e056a" containerName="barbican-worker-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738765 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe931cd2-6e31-4e82-a617-f028019a60c4" containerName="glance-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738773 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-central-agent" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738779 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="ceilometer-notification-agent" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738788 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e65ad49-eec3-460d-aa80-0880c5e2e86b" containerName="barbican-api-log" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738795 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-server" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738826 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cfc9265-de84-4047-9e01-69444aa4d9f5" containerName="nova-cell1-conductor-conductor" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738835 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e8dfd8a-0624-4f78-8c35-c6710328de9d" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738843 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1c3c42e-0126-41e6-9536-d5096eb44680" containerName="proxy-httpd" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738850 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="58a117e9-40a2-43bc-b52b-6bbfdd0f45dd" containerName="cinder-api" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738860 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738866 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="container-updater" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738876 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccff7261-b3b5-4ed4-9f4f-76be8cb5a3f5" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738884 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="37e7e487-28ea-405b-a645-a85aa94e12d2" containerName="mariadb-account-delete" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.738893 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1803fc99-2cc8-44e7-8ce5-eac5bc548f88" containerName="account-replicator" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.739913 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.756845 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jztnl"] Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.849610 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-utilities\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.849687 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78vbg\" (UniqueName: \"kubernetes.io/projected/59b35b26-8912-41df-aff4-b4a4055c39f8-kube-api-access-78vbg\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.849725 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-catalog-content\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.950926 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-utilities\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.951038 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78vbg\" (UniqueName: \"kubernetes.io/projected/59b35b26-8912-41df-aff4-b4a4055c39f8-kube-api-access-78vbg\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.951058 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-catalog-content\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.951451 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-utilities\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.951531 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-catalog-content\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:07 crc kubenswrapper[5010]: I1126 15:57:07.975784 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78vbg\" (UniqueName: \"kubernetes.io/projected/59b35b26-8912-41df-aff4-b4a4055c39f8-kube-api-access-78vbg\") pod \"community-operators-jztnl\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:08 crc kubenswrapper[5010]: I1126 15:57:08.103451 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:08 crc kubenswrapper[5010]: I1126 15:57:08.592265 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jztnl"] Nov 26 15:57:08 crc kubenswrapper[5010]: W1126 15:57:08.603148 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59b35b26_8912_41df_aff4_b4a4055c39f8.slice/crio-a835b5df6dd41db5c4616119f4f085a191235468f8c3b15bc88c3a48a562c00e WatchSource:0}: Error finding container a835b5df6dd41db5c4616119f4f085a191235468f8c3b15bc88c3a48a562c00e: Status 404 returned error can't find the container with id a835b5df6dd41db5c4616119f4f085a191235468f8c3b15bc88c3a48a562c00e Nov 26 15:57:09 crc kubenswrapper[5010]: I1126 15:57:09.403876 5010 generic.go:334] "Generic (PLEG): container finished" podID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerID="9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce" exitCode=0 Nov 26 15:57:09 crc kubenswrapper[5010]: I1126 15:57:09.404003 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jztnl" event={"ID":"59b35b26-8912-41df-aff4-b4a4055c39f8","Type":"ContainerDied","Data":"9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce"} Nov 26 15:57:09 crc kubenswrapper[5010]: I1126 15:57:09.404450 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jztnl" event={"ID":"59b35b26-8912-41df-aff4-b4a4055c39f8","Type":"ContainerStarted","Data":"a835b5df6dd41db5c4616119f4f085a191235468f8c3b15bc88c3a48a562c00e"} Nov 26 15:57:09 crc kubenswrapper[5010]: I1126 15:57:09.407278 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 15:57:10 crc kubenswrapper[5010]: I1126 15:57:10.414898 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jztnl" event={"ID":"59b35b26-8912-41df-aff4-b4a4055c39f8","Type":"ContainerStarted","Data":"bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776"} Nov 26 15:57:11 crc kubenswrapper[5010]: I1126 15:57:11.425360 5010 generic.go:334] "Generic (PLEG): container finished" podID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerID="bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776" exitCode=0 Nov 26 15:57:11 crc kubenswrapper[5010]: I1126 15:57:11.425406 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jztnl" event={"ID":"59b35b26-8912-41df-aff4-b4a4055c39f8","Type":"ContainerDied","Data":"bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776"} Nov 26 15:57:12 crc kubenswrapper[5010]: I1126 15:57:12.436365 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jztnl" event={"ID":"59b35b26-8912-41df-aff4-b4a4055c39f8","Type":"ContainerStarted","Data":"7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62"} Nov 26 15:57:12 crc kubenswrapper[5010]: I1126 15:57:12.467103 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jztnl" podStartSLOduration=2.991965134 podStartE2EDuration="5.467086128s" podCreationTimestamp="2025-11-26 15:57:07 +0000 UTC" firstStartedPulling="2025-11-26 15:57:09.407061951 +0000 UTC m=+1850.197779099" lastFinishedPulling="2025-11-26 15:57:11.882182925 +0000 UTC m=+1852.672900093" observedRunningTime="2025-11-26 15:57:12.461234552 +0000 UTC m=+1853.251951730" watchObservedRunningTime="2025-11-26 15:57:12.467086128 +0000 UTC m=+1853.257803276" Nov 26 15:57:18 crc kubenswrapper[5010]: I1126 15:57:18.103880 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:18 crc kubenswrapper[5010]: I1126 15:57:18.104911 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:18 crc kubenswrapper[5010]: I1126 15:57:18.176048 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:18 crc kubenswrapper[5010]: I1126 15:57:18.568508 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:18 crc kubenswrapper[5010]: I1126 15:57:18.633035 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jztnl"] Nov 26 15:57:20 crc kubenswrapper[5010]: I1126 15:57:20.522053 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jztnl" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="registry-server" containerID="cri-o://7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62" gracePeriod=2 Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.044997 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.161285 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-catalog-content\") pod \"59b35b26-8912-41df-aff4-b4a4055c39f8\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.161479 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78vbg\" (UniqueName: \"kubernetes.io/projected/59b35b26-8912-41df-aff4-b4a4055c39f8-kube-api-access-78vbg\") pod \"59b35b26-8912-41df-aff4-b4a4055c39f8\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.161641 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-utilities\") pod \"59b35b26-8912-41df-aff4-b4a4055c39f8\" (UID: \"59b35b26-8912-41df-aff4-b4a4055c39f8\") " Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.162618 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-utilities" (OuterVolumeSpecName: "utilities") pod "59b35b26-8912-41df-aff4-b4a4055c39f8" (UID: "59b35b26-8912-41df-aff4-b4a4055c39f8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.166973 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59b35b26-8912-41df-aff4-b4a4055c39f8-kube-api-access-78vbg" (OuterVolumeSpecName: "kube-api-access-78vbg") pod "59b35b26-8912-41df-aff4-b4a4055c39f8" (UID: "59b35b26-8912-41df-aff4-b4a4055c39f8"). InnerVolumeSpecName "kube-api-access-78vbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.229903 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "59b35b26-8912-41df-aff4-b4a4055c39f8" (UID: "59b35b26-8912-41df-aff4-b4a4055c39f8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.263331 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.263378 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/59b35b26-8912-41df-aff4-b4a4055c39f8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.263397 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78vbg\" (UniqueName: \"kubernetes.io/projected/59b35b26-8912-41df-aff4-b4a4055c39f8-kube-api-access-78vbg\") on node \"crc\" DevicePath \"\"" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.538000 5010 generic.go:334] "Generic (PLEG): container finished" podID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerID="7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62" exitCode=0 Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.538078 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jztnl" event={"ID":"59b35b26-8912-41df-aff4-b4a4055c39f8","Type":"ContainerDied","Data":"7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62"} Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.538098 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jztnl" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.538137 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jztnl" event={"ID":"59b35b26-8912-41df-aff4-b4a4055c39f8","Type":"ContainerDied","Data":"a835b5df6dd41db5c4616119f4f085a191235468f8c3b15bc88c3a48a562c00e"} Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.538177 5010 scope.go:117] "RemoveContainer" containerID="7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.580193 5010 scope.go:117] "RemoveContainer" containerID="bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.603024 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jztnl"] Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.610675 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jztnl"] Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.621958 5010 scope.go:117] "RemoveContainer" containerID="9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.638874 5010 scope.go:117] "RemoveContainer" containerID="7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62" Nov 26 15:57:21 crc kubenswrapper[5010]: E1126 15:57:21.639377 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62\": container with ID starting with 7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62 not found: ID does not exist" containerID="7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.639419 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62"} err="failed to get container status \"7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62\": rpc error: code = NotFound desc = could not find container \"7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62\": container with ID starting with 7f0bfb74494194d652aafc4c1132f0f42a4e86f15ef3b2d2629ef49dc6f31d62 not found: ID does not exist" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.639446 5010 scope.go:117] "RemoveContainer" containerID="bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776" Nov 26 15:57:21 crc kubenswrapper[5010]: E1126 15:57:21.639843 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776\": container with ID starting with bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776 not found: ID does not exist" containerID="bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.639893 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776"} err="failed to get container status \"bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776\": rpc error: code = NotFound desc = could not find container \"bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776\": container with ID starting with bf5db526032fc8ada4f10413384c9390303688b99a880ac5a4f247ef8e2f3776 not found: ID does not exist" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.639928 5010 scope.go:117] "RemoveContainer" containerID="9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce" Nov 26 15:57:21 crc kubenswrapper[5010]: E1126 15:57:21.640227 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce\": container with ID starting with 9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce not found: ID does not exist" containerID="9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.640260 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce"} err="failed to get container status \"9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce\": rpc error: code = NotFound desc = could not find container \"9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce\": container with ID starting with 9397ad4d53d583122a58e56256ae62f06fb1f5db08ad6e3267a65842ba6f4dce not found: ID does not exist" Nov 26 15:57:21 crc kubenswrapper[5010]: I1126 15:57:21.909448 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" path="/var/lib/kubelet/pods/59b35b26-8912-41df-aff4-b4a4055c39f8/volumes" Nov 26 15:58:02 crc kubenswrapper[5010]: I1126 15:58:02.728380 5010 scope.go:117] "RemoveContainer" containerID="bcb613e02e21fe00cab73d805301517aca7702080d4b0be085e5d732ae8551ce" Nov 26 15:58:02 crc kubenswrapper[5010]: I1126 15:58:02.786456 5010 scope.go:117] "RemoveContainer" containerID="616f09e91a853fa7f48dc2772ca9ce83cdcd59f906f1c5dc972742151702135d" Nov 26 15:58:02 crc kubenswrapper[5010]: I1126 15:58:02.812315 5010 scope.go:117] "RemoveContainer" containerID="40773634bcf0dbfe66732e09a3774b69fe16ab2fa9cf86335c7a911522a33022" Nov 26 15:59:02 crc kubenswrapper[5010]: I1126 15:59:02.906877 5010 scope.go:117] "RemoveContainer" containerID="d3a46fe789b64f7f7e31ca73390ee26d342047117f10028a576f19fb76dfcedd" Nov 26 15:59:02 crc kubenswrapper[5010]: I1126 15:59:02.943802 5010 scope.go:117] "RemoveContainer" containerID="130cd73fcbfef4ee3a96c354f8416b71c7b8fee2a7b71d13849287e7634e311c" Nov 26 15:59:02 crc kubenswrapper[5010]: I1126 15:59:02.974072 5010 scope.go:117] "RemoveContainer" containerID="a6ad69ad2f275b095e988e7b73cf40bc7e5931361b0c0f62610c61efb5efc0da" Nov 26 15:59:03 crc kubenswrapper[5010]: I1126 15:59:03.000225 5010 scope.go:117] "RemoveContainer" containerID="6baafe4a5f8c80877726c698f3d543e68971f894d1282f36e20f9a48993d1572" Nov 26 15:59:03 crc kubenswrapper[5010]: I1126 15:59:03.019743 5010 scope.go:117] "RemoveContainer" containerID="dfeaf06140981180f1a1a2e0c95ed0d76c067b94bc7a96a4d1dbe2f41d09225e" Nov 26 15:59:03 crc kubenswrapper[5010]: I1126 15:59:03.040212 5010 scope.go:117] "RemoveContainer" containerID="91d30cd917e1e2e4008b14f3788854cb37b47aec0ecb90baaa45b3f45936f4c7" Nov 26 15:59:03 crc kubenswrapper[5010]: I1126 15:59:03.065322 5010 scope.go:117] "RemoveContainer" containerID="1df44582fa6209a465f2fb40008f0c5b3dc20f374d51a23ca41e9704deffffd1" Nov 26 15:59:11 crc kubenswrapper[5010]: I1126 15:59:11.422782 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:59:11 crc kubenswrapper[5010]: I1126 15:59:11.423538 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.588364 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 15:59:18 crc kubenswrapper[5010]: E1126 15:59:18.589232 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="extract-utilities" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.589247 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="extract-utilities" Nov 26 15:59:18 crc kubenswrapper[5010]: E1126 15:59:18.589269 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="extract-content" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.589275 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="extract-content" Nov 26 15:59:18 crc kubenswrapper[5010]: E1126 15:59:18.589296 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="registry-server" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.589301 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="registry-server" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.589434 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="59b35b26-8912-41df-aff4-b4a4055c39f8" containerName="registry-server" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.589977 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.593133 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.593395 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.597361 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.749445 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1a383ee-aa17-4033-ae1c-202a2deef72d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.749530 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1a383ee-aa17-4033-ae1c-202a2deef72d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.851454 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1a383ee-aa17-4033-ae1c-202a2deef72d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.851668 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1a383ee-aa17-4033-ae1c-202a2deef72d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.851862 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1a383ee-aa17-4033-ae1c-202a2deef72d-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.878111 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1a383ee-aa17-4033-ae1c-202a2deef72d-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:18 crc kubenswrapper[5010]: I1126 15:59:18.922571 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:19 crc kubenswrapper[5010]: I1126 15:59:19.475592 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 15:59:19 crc kubenswrapper[5010]: I1126 15:59:19.532965 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1a383ee-aa17-4033-ae1c-202a2deef72d","Type":"ContainerStarted","Data":"0390637487c867441b07654ecfe20a94641ec183c33953e698a65459193105fb"} Nov 26 15:59:20 crc kubenswrapper[5010]: I1126 15:59:20.541899 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1a383ee-aa17-4033-ae1c-202a2deef72d","Type":"ContainerStarted","Data":"6ada7afaeb7135bec84723fb0718f918295569483fe290e78f681ed80cd91b61"} Nov 26 15:59:20 crc kubenswrapper[5010]: I1126 15:59:20.559311 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.55928816 podStartE2EDuration="2.55928816s" podCreationTimestamp="2025-11-26 15:59:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:59:20.555897216 +0000 UTC m=+1981.346614374" watchObservedRunningTime="2025-11-26 15:59:20.55928816 +0000 UTC m=+1981.350005308" Nov 26 15:59:21 crc kubenswrapper[5010]: I1126 15:59:21.550608 5010 generic.go:334] "Generic (PLEG): container finished" podID="f1a383ee-aa17-4033-ae1c-202a2deef72d" containerID="6ada7afaeb7135bec84723fb0718f918295569483fe290e78f681ed80cd91b61" exitCode=0 Nov 26 15:59:21 crc kubenswrapper[5010]: I1126 15:59:21.550664 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1a383ee-aa17-4033-ae1c-202a2deef72d","Type":"ContainerDied","Data":"6ada7afaeb7135bec84723fb0718f918295569483fe290e78f681ed80cd91b61"} Nov 26 15:59:22 crc kubenswrapper[5010]: I1126 15:59:22.945498 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.121194 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1a383ee-aa17-4033-ae1c-202a2deef72d-kubelet-dir\") pod \"f1a383ee-aa17-4033-ae1c-202a2deef72d\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.121291 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1a383ee-aa17-4033-ae1c-202a2deef72d-kube-api-access\") pod \"f1a383ee-aa17-4033-ae1c-202a2deef72d\" (UID: \"f1a383ee-aa17-4033-ae1c-202a2deef72d\") " Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.121329 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f1a383ee-aa17-4033-ae1c-202a2deef72d-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "f1a383ee-aa17-4033-ae1c-202a2deef72d" (UID: "f1a383ee-aa17-4033-ae1c-202a2deef72d"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.121584 5010 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/f1a383ee-aa17-4033-ae1c-202a2deef72d-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.129044 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1a383ee-aa17-4033-ae1c-202a2deef72d-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "f1a383ee-aa17-4033-ae1c-202a2deef72d" (UID: "f1a383ee-aa17-4033-ae1c-202a2deef72d"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.223558 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1a383ee-aa17-4033-ae1c-202a2deef72d-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.588554 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"f1a383ee-aa17-4033-ae1c-202a2deef72d","Type":"ContainerDied","Data":"0390637487c867441b07654ecfe20a94641ec183c33953e698a65459193105fb"} Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.588657 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0390637487c867441b07654ecfe20a94641ec183c33953e698a65459193105fb" Nov 26 15:59:23 crc kubenswrapper[5010]: I1126 15:59:23.588609 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.777063 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 15:59:25 crc kubenswrapper[5010]: E1126 15:59:25.778071 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1a383ee-aa17-4033-ae1c-202a2deef72d" containerName="pruner" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.778094 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1a383ee-aa17-4033-ae1c-202a2deef72d" containerName="pruner" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.778356 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1a383ee-aa17-4033-ae1c-202a2deef72d" containerName="pruner" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.789621 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.789839 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.794567 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.794876 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.970516 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-var-lock\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.971353 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8f08243-f216-4515-9f46-b08e1f443c95-kube-api-access\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:25 crc kubenswrapper[5010]: I1126 15:59:25.971449 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.073001 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-var-lock\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.073112 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8f08243-f216-4515-9f46-b08e1f443c95-kube-api-access\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.073192 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-var-lock\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.073204 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.073265 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-kubelet-dir\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.101488 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8f08243-f216-4515-9f46-b08e1f443c95-kube-api-access\") pod \"installer-9-crc\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.126899 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.592095 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 15:59:26 crc kubenswrapper[5010]: W1126 15:59:26.603193 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podd8f08243_f216_4515_9f46_b08e1f443c95.slice/crio-32841bbf3b53cfc8b2f93b90e7025a39bb59c254c8f6594c541a32cc73fa3fb8 WatchSource:0}: Error finding container 32841bbf3b53cfc8b2f93b90e7025a39bb59c254c8f6594c541a32cc73fa3fb8: Status 404 returned error can't find the container with id 32841bbf3b53cfc8b2f93b90e7025a39bb59c254c8f6594c541a32cc73fa3fb8 Nov 26 15:59:26 crc kubenswrapper[5010]: I1126 15:59:26.622870 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d8f08243-f216-4515-9f46-b08e1f443c95","Type":"ContainerStarted","Data":"32841bbf3b53cfc8b2f93b90e7025a39bb59c254c8f6594c541a32cc73fa3fb8"} Nov 26 15:59:27 crc kubenswrapper[5010]: I1126 15:59:27.638963 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d8f08243-f216-4515-9f46-b08e1f443c95","Type":"ContainerStarted","Data":"fd30dd7add349ccdb1b99f6bcf30008a230b6959008d014e41c61ea1cac500b0"} Nov 26 15:59:27 crc kubenswrapper[5010]: I1126 15:59:27.670327 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.670300755 podStartE2EDuration="2.670300755s" podCreationTimestamp="2025-11-26 15:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 15:59:27.667470684 +0000 UTC m=+1988.458187872" watchObservedRunningTime="2025-11-26 15:59:27.670300755 +0000 UTC m=+1988.461017943" Nov 26 15:59:41 crc kubenswrapper[5010]: I1126 15:59:41.422816 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 15:59:41 crc kubenswrapper[5010]: I1126 15:59:41.423923 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.164277 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9"] Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.166070 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.169683 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.169778 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.191000 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9"] Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.240689 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-config-volume\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.240767 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-secret-volume\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.240799 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x246z\" (UniqueName: \"kubernetes.io/projected/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-kube-api-access-x246z\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.341955 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-config-volume\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.342040 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-secret-volume\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.342088 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x246z\" (UniqueName: \"kubernetes.io/projected/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-kube-api-access-x246z\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.343205 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-config-volume\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.349309 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-secret-volume\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.364962 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x246z\" (UniqueName: \"kubernetes.io/projected/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-kube-api-access-x246z\") pod \"collect-profiles-29402880-phdh9\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.502986 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.910178 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9"] Nov 26 16:00:00 crc kubenswrapper[5010]: I1126 16:00:00.970350 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" event={"ID":"f410c598-4b4b-4f3b-b5cb-772ff71b0a80","Type":"ContainerStarted","Data":"fc47a0d8ba0e03caa253c71680b1824f99859538d7277a11365ff8faf09c47b9"} Nov 26 16:00:01 crc kubenswrapper[5010]: I1126 16:00:01.981051 5010 generic.go:334] "Generic (PLEG): container finished" podID="f410c598-4b4b-4f3b-b5cb-772ff71b0a80" containerID="205a4dfc01f88c6e24ba775b4a943f30d4dd93428b79c6c7bed725e7d8543f80" exitCode=0 Nov 26 16:00:01 crc kubenswrapper[5010]: I1126 16:00:01.981139 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" event={"ID":"f410c598-4b4b-4f3b-b5cb-772ff71b0a80","Type":"ContainerDied","Data":"205a4dfc01f88c6e24ba775b4a943f30d4dd93428b79c6c7bed725e7d8543f80"} Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.383255 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.387301 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-secret-volume\") pod \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.387375 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-config-volume\") pod \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.387402 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x246z\" (UniqueName: \"kubernetes.io/projected/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-kube-api-access-x246z\") pod \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\" (UID: \"f410c598-4b4b-4f3b-b5cb-772ff71b0a80\") " Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.388739 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-config-volume" (OuterVolumeSpecName: "config-volume") pod "f410c598-4b4b-4f3b-b5cb-772ff71b0a80" (UID: "f410c598-4b4b-4f3b-b5cb-772ff71b0a80"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.395178 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-kube-api-access-x246z" (OuterVolumeSpecName: "kube-api-access-x246z") pod "f410c598-4b4b-4f3b-b5cb-772ff71b0a80" (UID: "f410c598-4b4b-4f3b-b5cb-772ff71b0a80"). InnerVolumeSpecName "kube-api-access-x246z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.395242 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f410c598-4b4b-4f3b-b5cb-772ff71b0a80" (UID: "f410c598-4b4b-4f3b-b5cb-772ff71b0a80"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.489014 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.489087 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:03 crc kubenswrapper[5010]: I1126 16:00:03.489117 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x246z\" (UniqueName: \"kubernetes.io/projected/f410c598-4b4b-4f3b-b5cb-772ff71b0a80-kube-api-access-x246z\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.008656 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" event={"ID":"f410c598-4b4b-4f3b-b5cb-772ff71b0a80","Type":"ContainerDied","Data":"fc47a0d8ba0e03caa253c71680b1824f99859538d7277a11365ff8faf09c47b9"} Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.008762 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc47a0d8ba0e03caa253c71680b1824f99859538d7277a11365ff8faf09c47b9" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.008783 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.476800 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6"] Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.481684 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402835-m4cc6"] Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.922191 5010 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.922557 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f410c598-4b4b-4f3b-b5cb-772ff71b0a80" containerName="collect-profiles" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.922574 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f410c598-4b4b-4f3b-b5cb-772ff71b0a80" containerName="collect-profiles" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.922784 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f410c598-4b4b-4f3b-b5cb-772ff71b0a80" containerName="collect-profiles" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.923279 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.923970 5010 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.924689 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f" gracePeriod=15 Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.924980 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429" gracePeriod=15 Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925002 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f" gracePeriod=15 Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.924977 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc" gracePeriod=15 Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.924732 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611" gracePeriod=15 Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925482 5010 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.925770 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925779 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.925787 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925793 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.925806 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925812 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.925819 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925824 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.925835 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925842 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.925850 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925856 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.925990 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.926004 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.926014 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.926025 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.926032 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.926038 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 16:00:04 crc kubenswrapper[5010]: E1126 16:00:04.926200 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 16:00:04 crc kubenswrapper[5010]: I1126 16:00:04.926546 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.114114 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.114152 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.114194 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.114405 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.114466 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.114529 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.114583 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.115171 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216526 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216572 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216601 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216630 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216651 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216658 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216735 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216759 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216758 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216790 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216779 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216686 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216855 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216936 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.216977 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.217056 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:05 crc kubenswrapper[5010]: I1126 16:00:05.906447 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="446fb8a2-da33-4281-a0bf-98d3450a22e7" path="/var/lib/kubelet/pods/446fb8a2-da33-4281-a0bf-98d3450a22e7/volumes" Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.030235 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.032892 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.034110 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429" exitCode=0 Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.034159 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611" exitCode=0 Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.034183 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc" exitCode=0 Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.034202 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f" exitCode=2 Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.034253 5010 scope.go:117] "RemoveContainer" containerID="f730fa010e25bf2660569d5a80a63a1a13b458cfe11f4461dada1c8af80c8a3b" Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.038962 5010 generic.go:334] "Generic (PLEG): container finished" podID="d8f08243-f216-4515-9f46-b08e1f443c95" containerID="fd30dd7add349ccdb1b99f6bcf30008a230b6959008d014e41c61ea1cac500b0" exitCode=0 Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.039093 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d8f08243-f216-4515-9f46-b08e1f443c95","Type":"ContainerDied","Data":"fd30dd7add349ccdb1b99f6bcf30008a230b6959008d014e41c61ea1cac500b0"} Nov 26 16:00:06 crc kubenswrapper[5010]: I1126 16:00:06.040526 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.132418 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.465790 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.467239 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.565291 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-kubelet-dir\") pod \"d8f08243-f216-4515-9f46-b08e1f443c95\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.565386 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8f08243-f216-4515-9f46-b08e1f443c95-kube-api-access\") pod \"d8f08243-f216-4515-9f46-b08e1f443c95\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.565417 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d8f08243-f216-4515-9f46-b08e1f443c95" (UID: "d8f08243-f216-4515-9f46-b08e1f443c95"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.565440 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-var-lock\") pod \"d8f08243-f216-4515-9f46-b08e1f443c95\" (UID: \"d8f08243-f216-4515-9f46-b08e1f443c95\") " Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.565483 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-var-lock" (OuterVolumeSpecName: "var-lock") pod "d8f08243-f216-4515-9f46-b08e1f443c95" (UID: "d8f08243-f216-4515-9f46-b08e1f443c95"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.566032 5010 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.566053 5010 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d8f08243-f216-4515-9f46-b08e1f443c95-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.572056 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8f08243-f216-4515-9f46-b08e1f443c95-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d8f08243-f216-4515-9f46-b08e1f443c95" (UID: "d8f08243-f216-4515-9f46-b08e1f443c95"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.628784 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.629900 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.630730 5010 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.631203 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.667156 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d8f08243-f216-4515-9f46-b08e1f443c95-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.768880 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769205 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769320 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769374 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769444 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769567 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769938 5010 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769968 5010 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.769986 5010 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 26 16:00:07 crc kubenswrapper[5010]: I1126 16:00:07.914956 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.162103 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.164769 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f" exitCode=0 Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.164873 5010 scope.go:117] "RemoveContainer" containerID="b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.165171 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.167583 5010 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.169103 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.171587 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"d8f08243-f216-4515-9f46-b08e1f443c95","Type":"ContainerDied","Data":"32841bbf3b53cfc8b2f93b90e7025a39bb59c254c8f6594c541a32cc73fa3fb8"} Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.171667 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="32841bbf3b53cfc8b2f93b90e7025a39bb59c254c8f6594c541a32cc73fa3fb8" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.171803 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.172380 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.173025 5010 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.177270 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.177824 5010 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.214520 5010 scope.go:117] "RemoveContainer" containerID="2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.257166 5010 scope.go:117] "RemoveContainer" containerID="b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.285540 5010 scope.go:117] "RemoveContainer" containerID="badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.322136 5010 scope.go:117] "RemoveContainer" containerID="212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.367047 5010 scope.go:117] "RemoveContainer" containerID="1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.399836 5010 scope.go:117] "RemoveContainer" containerID="b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429" Nov 26 16:00:08 crc kubenswrapper[5010]: E1126 16:00:08.401258 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\": container with ID starting with b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429 not found: ID does not exist" containerID="b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.401308 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429"} err="failed to get container status \"b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\": rpc error: code = NotFound desc = could not find container \"b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429\": container with ID starting with b6452ba3f2d3bcd4431fcbdd0a685e727ebbd40ce8a1998b5eb367798a5ae429 not found: ID does not exist" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.401341 5010 scope.go:117] "RemoveContainer" containerID="2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611" Nov 26 16:00:08 crc kubenswrapper[5010]: E1126 16:00:08.401900 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\": container with ID starting with 2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611 not found: ID does not exist" containerID="2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.401945 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611"} err="failed to get container status \"2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\": rpc error: code = NotFound desc = could not find container \"2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611\": container with ID starting with 2154eced62e2a13adf545e99d58309e81978c6b4b1938a5817fd4ae5bb7e4611 not found: ID does not exist" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.401975 5010 scope.go:117] "RemoveContainer" containerID="b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc" Nov 26 16:00:08 crc kubenswrapper[5010]: E1126 16:00:08.402264 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\": container with ID starting with b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc not found: ID does not exist" containerID="b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.402315 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc"} err="failed to get container status \"b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\": rpc error: code = NotFound desc = could not find container \"b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc\": container with ID starting with b931abb2fb07c1685d8e4a4883d233994d738da7c45cadd551087bf0d5ac3bcc not found: ID does not exist" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.402350 5010 scope.go:117] "RemoveContainer" containerID="badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f" Nov 26 16:00:08 crc kubenswrapper[5010]: E1126 16:00:08.402940 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\": container with ID starting with badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f not found: ID does not exist" containerID="badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.402986 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f"} err="failed to get container status \"badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\": rpc error: code = NotFound desc = could not find container \"badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f\": container with ID starting with badbadc82a6cf25edda2132a2d325a7e2ecdd0df888893e751bb774bd191f49f not found: ID does not exist" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.403025 5010 scope.go:117] "RemoveContainer" containerID="212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f" Nov 26 16:00:08 crc kubenswrapper[5010]: E1126 16:00:08.403478 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\": container with ID starting with 212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f not found: ID does not exist" containerID="212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.403569 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f"} err="failed to get container status \"212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\": rpc error: code = NotFound desc = could not find container \"212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f\": container with ID starting with 212c441ce393c69e2f2314fc729a425d044163f78610057ca946c030f5bfc79f not found: ID does not exist" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.403653 5010 scope.go:117] "RemoveContainer" containerID="1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8" Nov 26 16:00:08 crc kubenswrapper[5010]: E1126 16:00:08.404245 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\": container with ID starting with 1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8 not found: ID does not exist" containerID="1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8" Nov 26 16:00:08 crc kubenswrapper[5010]: I1126 16:00:08.404281 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8"} err="failed to get container status \"1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\": rpc error: code = NotFound desc = could not find container \"1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8\": container with ID starting with 1e1e65ab14230fa9fe6ae1ea97b6871bf841ef10a98773231e6b7415508177b8 not found: ID does not exist" Nov 26 16:00:09 crc kubenswrapper[5010]: E1126 16:00:09.682578 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T16:00:09Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T16:00:09Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T16:00:09Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T16:00:09Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:09 crc kubenswrapper[5010]: E1126 16:00:09.683494 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:09 crc kubenswrapper[5010]: E1126 16:00:09.684164 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:09 crc kubenswrapper[5010]: E1126 16:00:09.684582 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:09 crc kubenswrapper[5010]: E1126 16:00:09.685155 5010 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:09 crc kubenswrapper[5010]: E1126 16:00:09.685193 5010 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 16:00:09 crc kubenswrapper[5010]: I1126 16:00:09.900579 5010 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:09 crc kubenswrapper[5010]: I1126 16:00:09.900920 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:09 crc kubenswrapper[5010]: E1126 16:00:09.971961 5010 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.154:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:09 crc kubenswrapper[5010]: I1126 16:00:09.972731 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:10 crc kubenswrapper[5010]: E1126 16:00:10.017682 5010 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.154:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b99d2df799126 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 16:00:10.017149222 +0000 UTC m=+2030.807866370,LastTimestamp:2025-11-26 16:00:10.017149222 +0000 UTC m=+2030.807866370,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 16:00:10 crc kubenswrapper[5010]: I1126 16:00:10.191344 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"88878554ee6770b9860f4f66f40efe5ac479d1f0103623cc99648858b8662bc0"} Nov 26 16:00:11 crc kubenswrapper[5010]: I1126 16:00:11.205006 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a"} Nov 26 16:00:11 crc kubenswrapper[5010]: E1126 16:00:11.205550 5010 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.154:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:11 crc kubenswrapper[5010]: I1126 16:00:11.206080 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:11 crc kubenswrapper[5010]: I1126 16:00:11.423049 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:00:11 crc kubenswrapper[5010]: I1126 16:00:11.423111 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:00:11 crc kubenswrapper[5010]: I1126 16:00:11.423158 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:00:11 crc kubenswrapper[5010]: I1126 16:00:11.424056 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"238c759f629d5116acbbb07eb94cd9109f06028f6b7ad27094247fc4ae32e555"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:00:11 crc kubenswrapper[5010]: I1126 16:00:11.424156 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://238c759f629d5116acbbb07eb94cd9109f06028f6b7ad27094247fc4ae32e555" gracePeriod=600 Nov 26 16:00:12 crc kubenswrapper[5010]: E1126 16:00:12.126030 5010 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.154:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187b99d2df799126 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 16:00:10.017149222 +0000 UTC m=+2030.807866370,LastTimestamp:2025-11-26 16:00:10.017149222 +0000 UTC m=+2030.807866370,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 16:00:12 crc kubenswrapper[5010]: I1126 16:00:12.220763 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="238c759f629d5116acbbb07eb94cd9109f06028f6b7ad27094247fc4ae32e555" exitCode=0 Nov 26 16:00:12 crc kubenswrapper[5010]: I1126 16:00:12.220785 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"238c759f629d5116acbbb07eb94cd9109f06028f6b7ad27094247fc4ae32e555"} Nov 26 16:00:12 crc kubenswrapper[5010]: I1126 16:00:12.220946 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f"} Nov 26 16:00:12 crc kubenswrapper[5010]: I1126 16:00:12.220974 5010 scope.go:117] "RemoveContainer" containerID="2867dab3b5a6395301b02dc8ac032628f6bd880f228f1aa8f97c56a5c1f7d6f5" Nov 26 16:00:12 crc kubenswrapper[5010]: I1126 16:00:12.222129 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:12 crc kubenswrapper[5010]: E1126 16:00:12.222417 5010 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.154:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:00:12 crc kubenswrapper[5010]: I1126 16:00:12.222827 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.014481 5010 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.015692 5010 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.016092 5010 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.016551 5010 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.017057 5010 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:14 crc kubenswrapper[5010]: I1126 16:00:14.017116 5010 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.017639 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="200ms" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.218857 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="400ms" Nov 26 16:00:14 crc kubenswrapper[5010]: E1126 16:00:14.619424 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="800ms" Nov 26 16:00:15 crc kubenswrapper[5010]: I1126 16:00:15.115094 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.83:8081/readyz\": dial tcp 10.217.0.83:8081: connect: connection refused" Nov 26 16:00:15 crc kubenswrapper[5010]: I1126 16:00:15.258230 5010 generic.go:334] "Generic (PLEG): container finished" podID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" containerID="123aa5532e736c7d79300e4d0a083c38cf139288714201d7f45153f59a0a9a2d" exitCode=1 Nov 26 16:00:15 crc kubenswrapper[5010]: I1126 16:00:15.258364 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" event={"ID":"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548","Type":"ContainerDied","Data":"123aa5532e736c7d79300e4d0a083c38cf139288714201d7f45153f59a0a9a2d"} Nov 26 16:00:15 crc kubenswrapper[5010]: I1126 16:00:15.259195 5010 scope.go:117] "RemoveContainer" containerID="123aa5532e736c7d79300e4d0a083c38cf139288714201d7f45153f59a0a9a2d" Nov 26 16:00:15 crc kubenswrapper[5010]: I1126 16:00:15.259638 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:15 crc kubenswrapper[5010]: I1126 16:00:15.260908 5010 status_manager.go:851] "Failed to get status for pod" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-dhngn\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:15 crc kubenswrapper[5010]: I1126 16:00:15.261508 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:15 crc kubenswrapper[5010]: E1126 16:00:15.420767 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="1.6s" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.278972 5010 generic.go:334] "Generic (PLEG): container finished" podID="b4799b0e-11ed-4331-84d1-daf581d00bbe" containerID="c8ca7ec6ec2d89eb4768de0b045c04739ae5566a9cfcb185559fb696a96e70c5" exitCode=1 Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.279080 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" event={"ID":"b4799b0e-11ed-4331-84d1-daf581d00bbe","Type":"ContainerDied","Data":"c8ca7ec6ec2d89eb4768de0b045c04739ae5566a9cfcb185559fb696a96e70c5"} Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.280755 5010 scope.go:117] "RemoveContainer" containerID="c8ca7ec6ec2d89eb4768de0b045c04739ae5566a9cfcb185559fb696a96e70c5" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.281239 5010 status_manager.go:851] "Failed to get status for pod" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-fx8tr\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.281783 5010 status_manager.go:851] "Failed to get status for pod" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-dhngn\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.282548 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.283161 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.287158 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" event={"ID":"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548","Type":"ContainerDied","Data":"2e366fb2bc04b899db6c098803cd33d1bc5e3de24d15e5a18df32c9460ccc120"} Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.287246 5010 scope.go:117] "RemoveContainer" containerID="123aa5532e736c7d79300e4d0a083c38cf139288714201d7f45153f59a0a9a2d" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.287154 5010 generic.go:334] "Generic (PLEG): container finished" podID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" containerID="2e366fb2bc04b899db6c098803cd33d1bc5e3de24d15e5a18df32c9460ccc120" exitCode=1 Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.287856 5010 scope.go:117] "RemoveContainer" containerID="2e366fb2bc04b899db6c098803cd33d1bc5e3de24d15e5a18df32c9460ccc120" Nov 26 16:00:16 crc kubenswrapper[5010]: E1126 16:00:16.288600 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.288765 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.289761 5010 status_manager.go:851] "Failed to get status for pod" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-fx8tr\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.290775 5010 status_manager.go:851] "Failed to get status for pod" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-dhngn\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.291495 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.891225 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.893089 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.893257 5010 status_manager.go:851] "Failed to get status for pod" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-fx8tr\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.894900 5010 status_manager.go:851] "Failed to get status for pod" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-dhngn\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.895578 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.912206 5010 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.912230 5010 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:16 crc kubenswrapper[5010]: E1126 16:00:16.912757 5010 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:16 crc kubenswrapper[5010]: I1126 16:00:16.913256 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:17 crc kubenswrapper[5010]: E1126 16:00:17.021697 5010 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.154:6443: connect: connection refused" interval="3.2s" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.296897 5010 generic.go:334] "Generic (PLEG): container finished" podID="b4799b0e-11ed-4331-84d1-daf581d00bbe" containerID="39d3b7ef31a36555c43a80e0c9fbb3aa8535dd346b1a27390ad9900f22a5cc62" exitCode=1 Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.296967 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" event={"ID":"b4799b0e-11ed-4331-84d1-daf581d00bbe","Type":"ContainerDied","Data":"39d3b7ef31a36555c43a80e0c9fbb3aa8535dd346b1a27390ad9900f22a5cc62"} Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.297014 5010 scope.go:117] "RemoveContainer" containerID="c8ca7ec6ec2d89eb4768de0b045c04739ae5566a9cfcb185559fb696a96e70c5" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.297676 5010 status_manager.go:851] "Failed to get status for pod" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-fx8tr\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.297916 5010 scope.go:117] "RemoveContainer" containerID="39d3b7ef31a36555c43a80e0c9fbb3aa8535dd346b1a27390ad9900f22a5cc62" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.298004 5010 status_manager.go:851] "Failed to get status for pod" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-dhngn\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: E1126 16:00:17.298372 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-fx8tr_openstack-operators(b4799b0e-11ed-4331-84d1-daf581d00bbe)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.298445 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.298697 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.302932 5010 generic.go:334] "Generic (PLEG): container finished" podID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" containerID="5c955e1edf2b98e60d973c5658642dd0bf11329543a184e4299123ea0328968d" exitCode=1 Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.302987 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" event={"ID":"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6","Type":"ContainerDied","Data":"5c955e1edf2b98e60d973c5658642dd0bf11329543a184e4299123ea0328968d"} Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.303596 5010 scope.go:117] "RemoveContainer" containerID="5c955e1edf2b98e60d973c5658642dd0bf11329543a184e4299123ea0328968d" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.304145 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.304651 5010 status_manager.go:851] "Failed to get status for pod" podUID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-7757b8b846-drzn5\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.305123 5010 status_manager.go:851] "Failed to get status for pod" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-fx8tr\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.305399 5010 status_manager.go:851] "Failed to get status for pod" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-dhngn\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.305643 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.306141 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2ff59eacacea8174ca2a348f1175bd1188d31c27e23bb694877a52a54a320fbf"} Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.306185 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a7f630df3caa895e1ae9e295a3ab630b17c8d85400c3a2e4a8284131a7514e92"} Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.306545 5010 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.306575 5010 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.306831 5010 status_manager.go:851] "Failed to get status for pod" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: E1126 16:00:17.307028 5010 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.154:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.307130 5010 status_manager.go:851] "Failed to get status for pod" podUID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/metallb-system/pods/metallb-operator-controller-manager-7757b8b846-drzn5\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.307434 5010 status_manager.go:851] "Failed to get status for pod" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/octavia-operator-controller-manager-64cdc6ff96-fx8tr\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.307787 5010 status_manager.go:851] "Failed to get status for pod" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openstack-operators/pods/ironic-operator-controller-manager-67cb4dc6d4-dhngn\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:17 crc kubenswrapper[5010]: I1126 16:00:17.308097 5010 status_manager.go:851] "Failed to get status for pod" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-kt7rg\": dial tcp 38.102.83.154:6443: connect: connection refused" Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.319324 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.319774 5010 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c" exitCode=1 Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.319837 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c"} Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.320433 5010 scope.go:117] "RemoveContainer" containerID="26ea747cdc0f1966c239ecf0eea731ba54bbd8ebf3f09ea6800a8ba7b7b5bc5c" Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.326639 5010 generic.go:334] "Generic (PLEG): container finished" podID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" containerID="222a4e61999cc7b02b84c863e925834c7eafc4ef1cc9e1e9f11901e1abc296d6" exitCode=1 Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.326735 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" event={"ID":"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6","Type":"ContainerDied","Data":"222a4e61999cc7b02b84c863e925834c7eafc4ef1cc9e1e9f11901e1abc296d6"} Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.326776 5010 scope.go:117] "RemoveContainer" containerID="5c955e1edf2b98e60d973c5658642dd0bf11329543a184e4299123ea0328968d" Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.327222 5010 scope.go:117] "RemoveContainer" containerID="222a4e61999cc7b02b84c863e925834c7eafc4ef1cc9e1e9f11901e1abc296d6" Nov 26 16:00:18 crc kubenswrapper[5010]: E1126 16:00:18.327485 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-7757b8b846-drzn5_metallb-system(afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6)\"" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" podUID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.333558 5010 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="2ff59eacacea8174ca2a348f1175bd1188d31c27e23bb694877a52a54a320fbf" exitCode=0 Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.333635 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"2ff59eacacea8174ca2a348f1175bd1188d31c27e23bb694877a52a54a320fbf"} Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.333683 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"97d0bf7573b08533e82534b51f42e32a3202cb53b480e0a88ae38cd73bca484e"} Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.333699 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b025a6216c92fdcff518126e5c6224a53b340cf746f041011fd773c62ba183e3"} Nov 26 16:00:18 crc kubenswrapper[5010]: I1126 16:00:18.333741 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"fbc2a483af37a076bbd3d6181031c84e23983ebec0ce64809b6b16da7fcb51d7"} Nov 26 16:00:19 crc kubenswrapper[5010]: I1126 16:00:19.376136 5010 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:19 crc kubenswrapper[5010]: I1126 16:00:19.376526 5010 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:19 crc kubenswrapper[5010]: I1126 16:00:19.376546 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"bb49793b85e53c881d360b4f10ba49bdba7b01344a365dbf6d62eade5472a840"} Nov 26 16:00:19 crc kubenswrapper[5010]: I1126 16:00:19.376614 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:19 crc kubenswrapper[5010]: I1126 16:00:19.376641 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1b21d8a99e3dc5932b27067ebb8f60e12115a8aab8cfaf202c6c030024d1329a"} Nov 26 16:00:19 crc kubenswrapper[5010]: I1126 16:00:19.382271 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 16:00:19 crc kubenswrapper[5010]: I1126 16:00:19.382349 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"b325889014142cbb137f15b49c3f5ffd52c47cf6888232cbdd32b2fd290c1337"} Nov 26 16:00:20 crc kubenswrapper[5010]: I1126 16:00:20.718994 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 16:00:20 crc kubenswrapper[5010]: I1126 16:00:20.719875 5010 scope.go:117] "RemoveContainer" containerID="222a4e61999cc7b02b84c863e925834c7eafc4ef1cc9e1e9f11901e1abc296d6" Nov 26 16:00:20 crc kubenswrapper[5010]: E1126 16:00:20.720057 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=metallb-operator-controller-manager-7757b8b846-drzn5_metallb-system(afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6)\"" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" podUID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" Nov 26 16:00:21 crc kubenswrapper[5010]: I1126 16:00:21.914318 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:21 crc kubenswrapper[5010]: I1126 16:00:21.914610 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:21 crc kubenswrapper[5010]: I1126 16:00:21.923687 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:24 crc kubenswrapper[5010]: I1126 16:00:24.487317 5010 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:24 crc kubenswrapper[5010]: I1126 16:00:24.667061 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 16:00:24 crc kubenswrapper[5010]: I1126 16:00:24.672573 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 16:00:24 crc kubenswrapper[5010]: I1126 16:00:24.677194 5010 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c388bb08-f65c-462f-8e01-ae7993392db2" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.114795 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.115332 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.115645 5010 scope.go:117] "RemoveContainer" containerID="2e366fb2bc04b899db6c098803cd33d1bc5e3de24d15e5a18df32c9460ccc120" Nov 26 16:00:25 crc kubenswrapper[5010]: E1126 16:00:25.116023 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.449127 5010 generic.go:334] "Generic (PLEG): container finished" podID="93625d2a-6f36-43a8-b26c-8f6506955b15" containerID="fe461b19e16d20c2455c84e24a435bc410ffb68112aac3b95ec29676f730a5a5" exitCode=1 Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.449885 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" event={"ID":"93625d2a-6f36-43a8-b26c-8f6506955b15","Type":"ContainerDied","Data":"fe461b19e16d20c2455c84e24a435bc410ffb68112aac3b95ec29676f730a5a5"} Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.449915 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.450077 5010 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.450090 5010 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.450475 5010 scope.go:117] "RemoveContainer" containerID="2e366fb2bc04b899db6c098803cd33d1bc5e3de24d15e5a18df32c9460ccc120" Nov 26 16:00:25 crc kubenswrapper[5010]: E1126 16:00:25.450625 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.451046 5010 scope.go:117] "RemoveContainer" containerID="fe461b19e16d20c2455c84e24a435bc410ffb68112aac3b95ec29676f730a5a5" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.456466 5010 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c388bb08-f65c-462f-8e01-ae7993392db2" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.465976 5010 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://fbc2a483af37a076bbd3d6181031c84e23983ebec0ce64809b6b16da7fcb51d7" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.466023 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.677943 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.678219 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 16:00:25 crc kubenswrapper[5010]: I1126 16:00:25.678815 5010 scope.go:117] "RemoveContainer" containerID="39d3b7ef31a36555c43a80e0c9fbb3aa8535dd346b1a27390ad9900f22a5cc62" Nov 26 16:00:25 crc kubenswrapper[5010]: E1126 16:00:25.679094 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-fx8tr_openstack-operators(b4799b0e-11ed-4331-84d1-daf581d00bbe)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.461496 5010 generic.go:334] "Generic (PLEG): container finished" podID="b0d7107e-a617-4a7b-a6e3-0267996965ef" containerID="ff4e2ed76a4e549e0d660455bf751ed522150bb92788d3f57daebe0960cc8b8f" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.461550 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" event={"ID":"b0d7107e-a617-4a7b-a6e3-0267996965ef","Type":"ContainerDied","Data":"ff4e2ed76a4e549e0d660455bf751ed522150bb92788d3f57daebe0960cc8b8f"} Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.463065 5010 scope.go:117] "RemoveContainer" containerID="ff4e2ed76a4e549e0d660455bf751ed522150bb92788d3f57daebe0960cc8b8f" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.465588 5010 generic.go:334] "Generic (PLEG): container finished" podID="01236c17-da54-4428-9e82-9a3b0165d6fc" containerID="cae36229a20f3bd28ea7ae47bbaaa2ba414bf41fbcef640374ee743e2bf409c6" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.465621 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" event={"ID":"01236c17-da54-4428-9e82-9a3b0165d6fc","Type":"ContainerDied","Data":"cae36229a20f3bd28ea7ae47bbaaa2ba414bf41fbcef640374ee743e2bf409c6"} Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.466317 5010 scope.go:117] "RemoveContainer" containerID="cae36229a20f3bd28ea7ae47bbaaa2ba414bf41fbcef640374ee743e2bf409c6" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.471395 5010 generic.go:334] "Generic (PLEG): container finished" podID="93625d2a-6f36-43a8-b26c-8f6506955b15" containerID="f61948e73ca7c2ee2141ae7a3cfd919477923d37ac589e7f19414f5aa01192d4" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.471480 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" event={"ID":"93625d2a-6f36-43a8-b26c-8f6506955b15","Type":"ContainerDied","Data":"f61948e73ca7c2ee2141ae7a3cfd919477923d37ac589e7f19414f5aa01192d4"} Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.471528 5010 scope.go:117] "RemoveContainer" containerID="fe461b19e16d20c2455c84e24a435bc410ffb68112aac3b95ec29676f730a5a5" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.475112 5010 generic.go:334] "Generic (PLEG): container finished" podID="ce1fedbc-31da-4c37-9731-34e79ab604f4" containerID="9453bb43265bec2f03c27e2b772be454e0779f6488bb7baf96ce9657dbfeea03" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.475210 5010 scope.go:117] "RemoveContainer" containerID="f61948e73ca7c2ee2141ae7a3cfd919477923d37ac589e7f19414f5aa01192d4" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.475231 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" event={"ID":"ce1fedbc-31da-4c37-9731-34e79ab604f4","Type":"ContainerDied","Data":"9453bb43265bec2f03c27e2b772be454e0779f6488bb7baf96ce9657dbfeea03"} Nov 26 16:00:26 crc kubenswrapper[5010]: E1126 16:00:26.475603 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-sxdct_openstack-operators(93625d2a-6f36-43a8-b26c-8f6506955b15)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" podUID="93625d2a-6f36-43a8-b26c-8f6506955b15" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.476189 5010 scope.go:117] "RemoveContainer" containerID="9453bb43265bec2f03c27e2b772be454e0779f6488bb7baf96ce9657dbfeea03" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.483317 5010 generic.go:334] "Generic (PLEG): container finished" podID="cdfa6310-b994-49ba-8e89-dc6584a65314" containerID="8606ed869ed200cba20c879dfa9b7f2f431c3538d3ed1f328dc53dba3bff0cdc" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.483387 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" event={"ID":"cdfa6310-b994-49ba-8e89-dc6584a65314","Type":"ContainerDied","Data":"8606ed869ed200cba20c879dfa9b7f2f431c3538d3ed1f328dc53dba3bff0cdc"} Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.483940 5010 scope.go:117] "RemoveContainer" containerID="8606ed869ed200cba20c879dfa9b7f2f431c3538d3ed1f328dc53dba3bff0cdc" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.486760 5010 generic.go:334] "Generic (PLEG): container finished" podID="dfb4a15b-a139-4778-acc7-f236e947ca96" containerID="8b37edfe16175ce928894b2392eb797fe30712ce76387cbaedf24af7351940a9" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.486820 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" event={"ID":"dfb4a15b-a139-4778-acc7-f236e947ca96","Type":"ContainerDied","Data":"8b37edfe16175ce928894b2392eb797fe30712ce76387cbaedf24af7351940a9"} Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.487165 5010 scope.go:117] "RemoveContainer" containerID="8b37edfe16175ce928894b2392eb797fe30712ce76387cbaedf24af7351940a9" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.489437 5010 generic.go:334] "Generic (PLEG): container finished" podID="191eef94-8fdf-4180-8ce0-1d62fc3f0de0" containerID="c40624e5036b61f37a3a5151cb7712ccdabb095aae47988a25b62eedc1cc46ad" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.489554 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" event={"ID":"191eef94-8fdf-4180-8ce0-1d62fc3f0de0","Type":"ContainerDied","Data":"c40624e5036b61f37a3a5151cb7712ccdabb095aae47988a25b62eedc1cc46ad"} Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.490170 5010 scope.go:117] "RemoveContainer" containerID="c40624e5036b61f37a3a5151cb7712ccdabb095aae47988a25b62eedc1cc46ad" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.492991 5010 generic.go:334] "Generic (PLEG): container finished" podID="bf155072-f786-47eb-9455-f807444d12e9" containerID="a3a2e91975b446dcf94c7de1d04c03b010215ecaae718f9c1f1253fb5381deac" exitCode=1 Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.493079 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" event={"ID":"bf155072-f786-47eb-9455-f807444d12e9","Type":"ContainerDied","Data":"a3a2e91975b446dcf94c7de1d04c03b010215ecaae718f9c1f1253fb5381deac"} Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.493226 5010 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.493237 5010 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="23c4a5fd-d711-43a1-95e5-db6d9016f440" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.493686 5010 scope.go:117] "RemoveContainer" containerID="a3a2e91975b446dcf94c7de1d04c03b010215ecaae718f9c1f1253fb5381deac" Nov 26 16:00:26 crc kubenswrapper[5010]: I1126 16:00:26.567163 5010 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="c388bb08-f65c-462f-8e01-ae7993392db2" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.504613 5010 generic.go:334] "Generic (PLEG): container finished" podID="b6c13a13-621b-45cb-9830-4dfaf15ee06b" containerID="786138c069c0810c3e4d6dc76e3af66319ca430ec0c277a722fbf73353c60a65" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.504667 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" event={"ID":"b6c13a13-621b-45cb-9830-4dfaf15ee06b","Type":"ContainerDied","Data":"786138c069c0810c3e4d6dc76e3af66319ca430ec0c277a722fbf73353c60a65"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.505644 5010 scope.go:117] "RemoveContainer" containerID="786138c069c0810c3e4d6dc76e3af66319ca430ec0c277a722fbf73353c60a65" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.509301 5010 generic.go:334] "Generic (PLEG): container finished" podID="6a970d68-d885-4fc2-9d58-508537a42572" containerID="b50f73582c007c8f24a9c03ddd236263dc25499703ad832f45245a094947f0c0" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.509408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" event={"ID":"6a970d68-d885-4fc2-9d58-508537a42572","Type":"ContainerDied","Data":"b50f73582c007c8f24a9c03ddd236263dc25499703ad832f45245a094947f0c0"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.512637 5010 scope.go:117] "RemoveContainer" containerID="b50f73582c007c8f24a9c03ddd236263dc25499703ad832f45245a094947f0c0" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.515150 5010 generic.go:334] "Generic (PLEG): container finished" podID="9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f" containerID="b1fd64c80ce43af3e354ed98cab94565fbbb3adb3f0d394ec74619425ffb7574" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.515276 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" event={"ID":"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f","Type":"ContainerDied","Data":"b1fd64c80ce43af3e354ed98cab94565fbbb3adb3f0d394ec74619425ffb7574"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.516461 5010 scope.go:117] "RemoveContainer" containerID="b1fd64c80ce43af3e354ed98cab94565fbbb3adb3f0d394ec74619425ffb7574" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.519916 5010 generic.go:334] "Generic (PLEG): container finished" podID="05194bfa-88c3-4826-8a59-6d62252e4b1a" containerID="b9db1037794704a0616860331855c58426ae1db11825a43d0dab8bc133a77603" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.520030 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" event={"ID":"05194bfa-88c3-4826-8a59-6d62252e4b1a","Type":"ContainerDied","Data":"b9db1037794704a0616860331855c58426ae1db11825a43d0dab8bc133a77603"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.521405 5010 scope.go:117] "RemoveContainer" containerID="b9db1037794704a0616860331855c58426ae1db11825a43d0dab8bc133a77603" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.526254 5010 generic.go:334] "Generic (PLEG): container finished" podID="cdfa6310-b994-49ba-8e89-dc6584a65314" containerID="f76ff90a964229428c475ae5e7da912de0d886e368d0cbc3ab3611da6efa7414" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.526362 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" event={"ID":"cdfa6310-b994-49ba-8e89-dc6584a65314","Type":"ContainerDied","Data":"f76ff90a964229428c475ae5e7da912de0d886e368d0cbc3ab3611da6efa7414"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.526579 5010 scope.go:117] "RemoveContainer" containerID="8606ed869ed200cba20c879dfa9b7f2f431c3538d3ed1f328dc53dba3bff0cdc" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.530610 5010 scope.go:117] "RemoveContainer" containerID="f76ff90a964229428c475ae5e7da912de0d886e368d0cbc3ab3611da6efa7414" Nov 26 16:00:27 crc kubenswrapper[5010]: E1126 16:00:27.532504 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-h9gnm_openstack-operators(cdfa6310-b994-49ba-8e89-dc6584a65314)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" podUID="cdfa6310-b994-49ba-8e89-dc6584a65314" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.540151 5010 generic.go:334] "Generic (PLEG): container finished" podID="b0d7107e-a617-4a7b-a6e3-0267996965ef" containerID="b6f727875a57a9ce1407886795665b4028280d6cbab737da62795136e025f4a9" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.540311 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" event={"ID":"b0d7107e-a617-4a7b-a6e3-0267996965ef","Type":"ContainerDied","Data":"b6f727875a57a9ce1407886795665b4028280d6cbab737da62795136e025f4a9"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.545876 5010 generic.go:334] "Generic (PLEG): container finished" podID="bf155072-f786-47eb-9455-f807444d12e9" containerID="dd0777e2b6d5aa10388112cc1b1d6015a1ec88713ec41070bcf2ccc575e346cd" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.545963 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" event={"ID":"bf155072-f786-47eb-9455-f807444d12e9","Type":"ContainerDied","Data":"dd0777e2b6d5aa10388112cc1b1d6015a1ec88713ec41070bcf2ccc575e346cd"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.547966 5010 scope.go:117] "RemoveContainer" containerID="dd0777e2b6d5aa10388112cc1b1d6015a1ec88713ec41070bcf2ccc575e346cd" Nov 26 16:00:27 crc kubenswrapper[5010]: E1126 16:00:27.548506 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-zq8vc_openstack-operators(bf155072-f786-47eb-9455-f807444d12e9)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" podUID="bf155072-f786-47eb-9455-f807444d12e9" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.550934 5010 scope.go:117] "RemoveContainer" containerID="b6f727875a57a9ce1407886795665b4028280d6cbab737da62795136e025f4a9" Nov 26 16:00:27 crc kubenswrapper[5010]: E1126 16:00:27.551367 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-gcj9h_openstack-operators(b0d7107e-a617-4a7b-a6e3-0267996965ef)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" podUID="b0d7107e-a617-4a7b-a6e3-0267996965ef" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.556214 5010 generic.go:334] "Generic (PLEG): container finished" podID="a4bbf592-007c-4176-a6a3-0209b33b6048" containerID="935bf0f31303b4c8e4604b5384f55b475f2c052bd5e4a6ed50ddd61d0dffa779" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.556270 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" event={"ID":"a4bbf592-007c-4176-a6a3-0209b33b6048","Type":"ContainerDied","Data":"935bf0f31303b4c8e4604b5384f55b475f2c052bd5e4a6ed50ddd61d0dffa779"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.556744 5010 scope.go:117] "RemoveContainer" containerID="935bf0f31303b4c8e4604b5384f55b475f2c052bd5e4a6ed50ddd61d0dffa779" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.558352 5010 generic.go:334] "Generic (PLEG): container finished" podID="ce1fedbc-31da-4c37-9731-34e79ab604f4" containerID="bc67c8b3b49e1688ee59470962dc5a6ba475642655cdef41ba5ba20dc42161a0" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.558371 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" event={"ID":"ce1fedbc-31da-4c37-9731-34e79ab604f4","Type":"ContainerDied","Data":"bc67c8b3b49e1688ee59470962dc5a6ba475642655cdef41ba5ba20dc42161a0"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.559993 5010 scope.go:117] "RemoveContainer" containerID="bc67c8b3b49e1688ee59470962dc5a6ba475642655cdef41ba5ba20dc42161a0" Nov 26 16:00:27 crc kubenswrapper[5010]: E1126 16:00:27.560220 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-9lx7h_openstack-operators(ce1fedbc-31da-4c37-9731-34e79ab604f4)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" podUID="ce1fedbc-31da-4c37-9731-34e79ab604f4" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.560754 5010 generic.go:334] "Generic (PLEG): container finished" podID="dfb4a15b-a139-4778-acc7-f236e947ca96" containerID="4997370ea983f7691e6654e87ab4cfe917cb8acce21e81c874d286ce490050b1" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.560829 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" event={"ID":"dfb4a15b-a139-4778-acc7-f236e947ca96","Type":"ContainerDied","Data":"4997370ea983f7691e6654e87ab4cfe917cb8acce21e81c874d286ce490050b1"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.561171 5010 scope.go:117] "RemoveContainer" containerID="4997370ea983f7691e6654e87ab4cfe917cb8acce21e81c874d286ce490050b1" Nov 26 16:00:27 crc kubenswrapper[5010]: E1126 16:00:27.561391 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-sj6tg_openstack-operators(dfb4a15b-a139-4778-acc7-f236e947ca96)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" podUID="dfb4a15b-a139-4778-acc7-f236e947ca96" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.567916 5010 generic.go:334] "Generic (PLEG): container finished" podID="191eef94-8fdf-4180-8ce0-1d62fc3f0de0" containerID="7a0816c89341c5499119fcedc2fd8fe7e63f5208a8ec5450c940288af755e6d8" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.568028 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" event={"ID":"191eef94-8fdf-4180-8ce0-1d62fc3f0de0","Type":"ContainerDied","Data":"7a0816c89341c5499119fcedc2fd8fe7e63f5208a8ec5450c940288af755e6d8"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.568739 5010 scope.go:117] "RemoveContainer" containerID="7a0816c89341c5499119fcedc2fd8fe7e63f5208a8ec5450c940288af755e6d8" Nov 26 16:00:27 crc kubenswrapper[5010]: E1126 16:00:27.569198 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-f64fd_openstack-operators(191eef94-8fdf-4180-8ce0-1d62fc3f0de0)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" podUID="191eef94-8fdf-4180-8ce0-1d62fc3f0de0" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.573737 5010 generic.go:334] "Generic (PLEG): container finished" podID="01236c17-da54-4428-9e82-9a3b0165d6fc" containerID="4983f8dd92fd096ad73e67faeb5f8a1f9463456f2fc377546693d958e28e3fa7" exitCode=1 Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.573736 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" event={"ID":"01236c17-da54-4428-9e82-9a3b0165d6fc","Type":"ContainerDied","Data":"4983f8dd92fd096ad73e67faeb5f8a1f9463456f2fc377546693d958e28e3fa7"} Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.574267 5010 scope.go:117] "RemoveContainer" containerID="4983f8dd92fd096ad73e67faeb5f8a1f9463456f2fc377546693d958e28e3fa7" Nov 26 16:00:27 crc kubenswrapper[5010]: E1126 16:00:27.574501 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-zrldc_openstack-operators(01236c17-da54-4428-9e82-9a3b0165d6fc)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" podUID="01236c17-da54-4428-9e82-9a3b0165d6fc" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.588533 5010 scope.go:117] "RemoveContainer" containerID="ff4e2ed76a4e549e0d660455bf751ed522150bb92788d3f57daebe0960cc8b8f" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.711425 5010 scope.go:117] "RemoveContainer" containerID="a3a2e91975b446dcf94c7de1d04c03b010215ecaae718f9c1f1253fb5381deac" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.738417 5010 scope.go:117] "RemoveContainer" containerID="9453bb43265bec2f03c27e2b772be454e0779f6488bb7baf96ce9657dbfeea03" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.776532 5010 scope.go:117] "RemoveContainer" containerID="8b37edfe16175ce928894b2392eb797fe30712ce76387cbaedf24af7351940a9" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.809681 5010 scope.go:117] "RemoveContainer" containerID="c40624e5036b61f37a3a5151cb7712ccdabb095aae47988a25b62eedc1cc46ad" Nov 26 16:00:27 crc kubenswrapper[5010]: I1126 16:00:27.836947 5010 scope.go:117] "RemoveContainer" containerID="cae36229a20f3bd28ea7ae47bbaaa2ba414bf41fbcef640374ee743e2bf409c6" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.588006 5010 generic.go:334] "Generic (PLEG): container finished" podID="6a970d68-d885-4fc2-9d58-508537a42572" containerID="64a55030c6c894837eb6db6ae4c70d000febb4d75236220e04fa0ffb47ec39fe" exitCode=1 Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.588074 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" event={"ID":"6a970d68-d885-4fc2-9d58-508537a42572","Type":"ContainerDied","Data":"64a55030c6c894837eb6db6ae4c70d000febb4d75236220e04fa0ffb47ec39fe"} Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.588109 5010 scope.go:117] "RemoveContainer" containerID="b50f73582c007c8f24a9c03ddd236263dc25499703ad832f45245a094947f0c0" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.588470 5010 scope.go:117] "RemoveContainer" containerID="64a55030c6c894837eb6db6ae4c70d000febb4d75236220e04fa0ffb47ec39fe" Nov 26 16:00:28 crc kubenswrapper[5010]: E1126 16:00:28.588845 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-sbppr_openstack-operators(6a970d68-d885-4fc2-9d58-508537a42572)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" podUID="6a970d68-d885-4fc2-9d58-508537a42572" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.613801 5010 generic.go:334] "Generic (PLEG): container finished" podID="9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f" containerID="a7f59f9e901bf9d7be4b80c40c265cd7c0ed24bc9a7ce075477dec43154728cc" exitCode=1 Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.613929 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" event={"ID":"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f","Type":"ContainerDied","Data":"a7f59f9e901bf9d7be4b80c40c265cd7c0ed24bc9a7ce075477dec43154728cc"} Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.614640 5010 scope.go:117] "RemoveContainer" containerID="a7f59f9e901bf9d7be4b80c40c265cd7c0ed24bc9a7ce075477dec43154728cc" Nov 26 16:00:28 crc kubenswrapper[5010]: E1126 16:00:28.615114 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-c89k7_openstack-operators(9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" podUID="9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.619643 5010 generic.go:334] "Generic (PLEG): container finished" podID="a4bbf592-007c-4176-a6a3-0209b33b6048" containerID="0905832a42ac6fe820fcace1b0f0fc38e2399f94f1dbfbf25c2e3609d07d5341" exitCode=1 Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.619776 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" event={"ID":"a4bbf592-007c-4176-a6a3-0209b33b6048","Type":"ContainerDied","Data":"0905832a42ac6fe820fcace1b0f0fc38e2399f94f1dbfbf25c2e3609d07d5341"} Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.621855 5010 scope.go:117] "RemoveContainer" containerID="0905832a42ac6fe820fcace1b0f0fc38e2399f94f1dbfbf25c2e3609d07d5341" Nov 26 16:00:28 crc kubenswrapper[5010]: E1126 16:00:28.623098 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-qmr28_openstack-operators(a4bbf592-007c-4176-a6a3-0209b33b6048)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" podUID="a4bbf592-007c-4176-a6a3-0209b33b6048" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.637831 5010 generic.go:334] "Generic (PLEG): container finished" podID="05194bfa-88c3-4826-8a59-6d62252e4b1a" containerID="2817a934c40d92ea12cae6c0c9659d75b36229b260a71786e952932afbc02626" exitCode=1 Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.637926 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" event={"ID":"05194bfa-88c3-4826-8a59-6d62252e4b1a","Type":"ContainerDied","Data":"2817a934c40d92ea12cae6c0c9659d75b36229b260a71786e952932afbc02626"} Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.638859 5010 scope.go:117] "RemoveContainer" containerID="2817a934c40d92ea12cae6c0c9659d75b36229b260a71786e952932afbc02626" Nov 26 16:00:28 crc kubenswrapper[5010]: E1126 16:00:28.639353 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-5llrj_openstack-operators(05194bfa-88c3-4826-8a59-6d62252e4b1a)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" podUID="05194bfa-88c3-4826-8a59-6d62252e4b1a" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.641634 5010 generic.go:334] "Generic (PLEG): container finished" podID="a3bc645d-4358-47cb-9e3b-ebc975c69092" containerID="ba1e6a30e347fd8cad385f5d5d6a8d57bc5748d5f8906078aeff12ba6567ef19" exitCode=1 Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.641692 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" event={"ID":"a3bc645d-4358-47cb-9e3b-ebc975c69092","Type":"ContainerDied","Data":"ba1e6a30e347fd8cad385f5d5d6a8d57bc5748d5f8906078aeff12ba6567ef19"} Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.642292 5010 scope.go:117] "RemoveContainer" containerID="ba1e6a30e347fd8cad385f5d5d6a8d57bc5748d5f8906078aeff12ba6567ef19" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.649036 5010 generic.go:334] "Generic (PLEG): container finished" podID="b6c13a13-621b-45cb-9830-4dfaf15ee06b" containerID="388d1fb00f0f35064b72900d498b7f764cfa82db851ce02fa5e1a37b2192fdbe" exitCode=1 Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.649071 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" event={"ID":"b6c13a13-621b-45cb-9830-4dfaf15ee06b","Type":"ContainerDied","Data":"388d1fb00f0f35064b72900d498b7f764cfa82db851ce02fa5e1a37b2192fdbe"} Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.649544 5010 scope.go:117] "RemoveContainer" containerID="388d1fb00f0f35064b72900d498b7f764cfa82db851ce02fa5e1a37b2192fdbe" Nov 26 16:00:28 crc kubenswrapper[5010]: E1126 16:00:28.649839 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-4w8ql_openstack-operators(b6c13a13-621b-45cb-9830-4dfaf15ee06b)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" podUID="b6c13a13-621b-45cb-9830-4dfaf15ee06b" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.652641 5010 scope.go:117] "RemoveContainer" containerID="b1fd64c80ce43af3e354ed98cab94565fbbb3adb3f0d394ec74619425ffb7574" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.737698 5010 scope.go:117] "RemoveContainer" containerID="935bf0f31303b4c8e4604b5384f55b475f2c052bd5e4a6ed50ddd61d0dffa779" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.802268 5010 scope.go:117] "RemoveContainer" containerID="b9db1037794704a0616860331855c58426ae1db11825a43d0dab8bc133a77603" Nov 26 16:00:28 crc kubenswrapper[5010]: I1126 16:00:28.838046 5010 scope.go:117] "RemoveContainer" containerID="786138c069c0810c3e4d6dc76e3af66319ca430ec0c277a722fbf73353c60a65" Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.659013 5010 generic.go:334] "Generic (PLEG): container finished" podID="8b2b09a7-2b17-43da-ae0e-4448b96eed50" containerID="48ecdfd6ba5fa7d524fddb033a37e3916c9c2634cce289ddf1aaa3ae4e1f07b1" exitCode=1 Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.659087 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" event={"ID":"8b2b09a7-2b17-43da-ae0e-4448b96eed50","Type":"ContainerDied","Data":"48ecdfd6ba5fa7d524fddb033a37e3916c9c2634cce289ddf1aaa3ae4e1f07b1"} Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.660060 5010 scope.go:117] "RemoveContainer" containerID="48ecdfd6ba5fa7d524fddb033a37e3916c9c2634cce289ddf1aaa3ae4e1f07b1" Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.668047 5010 generic.go:334] "Generic (PLEG): container finished" podID="3daf5f1d-5d15-4b93-ac0b-8209060a0557" containerID="426e6bef153a7bb9774ad4371f1c7a77967e38df0b192c5dc24524a981ff0b37" exitCode=1 Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.668100 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" event={"ID":"3daf5f1d-5d15-4b93-ac0b-8209060a0557","Type":"ContainerDied","Data":"426e6bef153a7bb9774ad4371f1c7a77967e38df0b192c5dc24524a981ff0b37"} Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.668416 5010 scope.go:117] "RemoveContainer" containerID="426e6bef153a7bb9774ad4371f1c7a77967e38df0b192c5dc24524a981ff0b37" Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.673722 5010 generic.go:334] "Generic (PLEG): container finished" podID="7e5769c2-7f83-41ff-9365-7f5792e8d81b" containerID="636e667023d9a8bcd40bba7de400f2d406d56a4aa3de8ff2936814e20e6075f4" exitCode=1 Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.673737 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" event={"ID":"7e5769c2-7f83-41ff-9365-7f5792e8d81b","Type":"ContainerDied","Data":"636e667023d9a8bcd40bba7de400f2d406d56a4aa3de8ff2936814e20e6075f4"} Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.674263 5010 scope.go:117] "RemoveContainer" containerID="636e667023d9a8bcd40bba7de400f2d406d56a4aa3de8ff2936814e20e6075f4" Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.677775 5010 generic.go:334] "Generic (PLEG): container finished" podID="522c2ed1-a470-4885-88fc-395ed7834b23" containerID="d15d81ff00570f28f177a49da2a1e47a131b7d179fe2a0690555be742299479f" exitCode=1 Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.677831 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" event={"ID":"522c2ed1-a470-4885-88fc-395ed7834b23","Type":"ContainerDied","Data":"d15d81ff00570f28f177a49da2a1e47a131b7d179fe2a0690555be742299479f"} Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.678200 5010 scope.go:117] "RemoveContainer" containerID="d15d81ff00570f28f177a49da2a1e47a131b7d179fe2a0690555be742299479f" Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.679583 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" event={"ID":"a3bc645d-4358-47cb-9e3b-ebc975c69092","Type":"ContainerStarted","Data":"d8ec4c70fc7d1064d801e4da22f13dd0ee91509a4523242b61d57ad7d1c82f0a"} Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.679986 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.682138 5010 generic.go:334] "Generic (PLEG): container finished" podID="1ff0a07f-935b-493a-a18a-a449232dc185" containerID="bd717705b7d6552c334ac05e9e52e2cf5bd8e97bc19a100a306812b3a42aa922" exitCode=1 Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.682263 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" event={"ID":"1ff0a07f-935b-493a-a18a-a449232dc185","Type":"ContainerDied","Data":"bd717705b7d6552c334ac05e9e52e2cf5bd8e97bc19a100a306812b3a42aa922"} Nov 26 16:00:29 crc kubenswrapper[5010]: I1126 16:00:29.683036 5010 scope.go:117] "RemoveContainer" containerID="bd717705b7d6552c334ac05e9e52e2cf5bd8e97bc19a100a306812b3a42aa922" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.699099 5010 generic.go:334] "Generic (PLEG): container finished" podID="7e5769c2-7f83-41ff-9365-7f5792e8d81b" containerID="45ec74abecd9f369222be85712f2915f55924a6e2ba6f2388b2063ecf35d38e9" exitCode=1 Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.699165 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" event={"ID":"7e5769c2-7f83-41ff-9365-7f5792e8d81b","Type":"ContainerDied","Data":"45ec74abecd9f369222be85712f2915f55924a6e2ba6f2388b2063ecf35d38e9"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.699692 5010 scope.go:117] "RemoveContainer" containerID="636e667023d9a8bcd40bba7de400f2d406d56a4aa3de8ff2936814e20e6075f4" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.700890 5010 scope.go:117] "RemoveContainer" containerID="45ec74abecd9f369222be85712f2915f55924a6e2ba6f2388b2063ecf35d38e9" Nov 26 16:00:30 crc kubenswrapper[5010]: E1126 16:00:30.701997 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-k7vx2_openstack-operators(7e5769c2-7f83-41ff-9365-7f5792e8d81b)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" podUID="7e5769c2-7f83-41ff-9365-7f5792e8d81b" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.719317 5010 generic.go:334] "Generic (PLEG): container finished" podID="522c2ed1-a470-4885-88fc-395ed7834b23" containerID="e1337930b34a6a7d3cca8018189124704819e71a5db6061af53d19fb6c5e9a46" exitCode=1 Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.719437 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" event={"ID":"522c2ed1-a470-4885-88fc-395ed7834b23","Type":"ContainerDied","Data":"e1337930b34a6a7d3cca8018189124704819e71a5db6061af53d19fb6c5e9a46"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.720174 5010 scope.go:117] "RemoveContainer" containerID="e1337930b34a6a7d3cca8018189124704819e71a5db6061af53d19fb6c5e9a46" Nov 26 16:00:30 crc kubenswrapper[5010]: E1126 16:00:30.720549 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-bdtsk_openstack-operators(522c2ed1-a470-4885-88fc-395ed7834b23)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" podUID="522c2ed1-a470-4885-88fc-395ed7834b23" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.728332 5010 generic.go:334] "Generic (PLEG): container finished" podID="8b2b09a7-2b17-43da-ae0e-4448b96eed50" containerID="4f6ba9ba8ac763723907ff2cf50cc02e313f97147a3a9e1fe0545a0de914c725" exitCode=1 Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.728455 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" event={"ID":"8b2b09a7-2b17-43da-ae0e-4448b96eed50","Type":"ContainerDied","Data":"4f6ba9ba8ac763723907ff2cf50cc02e313f97147a3a9e1fe0545a0de914c725"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.729367 5010 scope.go:117] "RemoveContainer" containerID="4f6ba9ba8ac763723907ff2cf50cc02e313f97147a3a9e1fe0545a0de914c725" Nov 26 16:00:30 crc kubenswrapper[5010]: E1126 16:00:30.729876 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-mc96z_openstack-operators(8b2b09a7-2b17-43da-ae0e-4448b96eed50)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" podUID="8b2b09a7-2b17-43da-ae0e-4448b96eed50" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.731667 5010 generic.go:334] "Generic (PLEG): container finished" podID="1b523418-d938-4ba7-8788-b93b382429e3" containerID="1e7f60f161902fc7ea8951bcf6c0ff8f1c782354efd64e3efaed636c46858e43" exitCode=1 Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.731792 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" event={"ID":"1b523418-d938-4ba7-8788-b93b382429e3","Type":"ContainerDied","Data":"1e7f60f161902fc7ea8951bcf6c0ff8f1c782354efd64e3efaed636c46858e43"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.732862 5010 scope.go:117] "RemoveContainer" containerID="1e7f60f161902fc7ea8951bcf6c0ff8f1c782354efd64e3efaed636c46858e43" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.740497 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" event={"ID":"3daf5f1d-5d15-4b93-ac0b-8209060a0557","Type":"ContainerStarted","Data":"b1fb5fceb83ae310f4786b191cb389e0d52c51ca5c96f567e65b22ef95ab3aee"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.740783 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.746616 5010 generic.go:334] "Generic (PLEG): container finished" podID="7ec0a644-00e0-4b67-b2ad-7a7128dcaf19" containerID="f9412429b8a67c0a190bbe69c6b86f7e42407ffc5cb422757ff73d8c857f60d2" exitCode=1 Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.746900 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" event={"ID":"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19","Type":"ContainerDied","Data":"f9412429b8a67c0a190bbe69c6b86f7e42407ffc5cb422757ff73d8c857f60d2"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.747787 5010 scope.go:117] "RemoveContainer" containerID="f9412429b8a67c0a190bbe69c6b86f7e42407ffc5cb422757ff73d8c857f60d2" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.750534 5010 generic.go:334] "Generic (PLEG): container finished" podID="82a45cae-9275-4f6a-8807-1ed1c97da89e" containerID="154aef2360f415a2ceded687e7fa0a7301fb8088de9bde3d7b7c6cf03b55c884" exitCode=1 Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.750814 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" event={"ID":"82a45cae-9275-4f6a-8807-1ed1c97da89e","Type":"ContainerDied","Data":"154aef2360f415a2ceded687e7fa0a7301fb8088de9bde3d7b7c6cf03b55c884"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.751601 5010 scope.go:117] "RemoveContainer" containerID="154aef2360f415a2ceded687e7fa0a7301fb8088de9bde3d7b7c6cf03b55c884" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.755547 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" event={"ID":"1ff0a07f-935b-493a-a18a-a449232dc185","Type":"ContainerStarted","Data":"b92e809e6a1be9b26d27605387918403a7d6a7423d917fd8cc9dcec268653ed1"} Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.755947 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.789007 5010 scope.go:117] "RemoveContainer" containerID="d15d81ff00570f28f177a49da2a1e47a131b7d179fe2a0690555be742299479f" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.865826 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.866590 5010 scope.go:117] "RemoveContainer" containerID="f61948e73ca7c2ee2141ae7a3cfd919477923d37ac589e7f19414f5aa01192d4" Nov 26 16:00:30 crc kubenswrapper[5010]: E1126 16:00:30.866911 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-sxdct_openstack-operators(93625d2a-6f36-43a8-b26c-8f6506955b15)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" podUID="93625d2a-6f36-43a8-b26c-8f6506955b15" Nov 26 16:00:30 crc kubenswrapper[5010]: I1126 16:00:30.899990 5010 scope.go:117] "RemoveContainer" containerID="48ecdfd6ba5fa7d524fddb033a37e3916c9c2634cce289ddf1aaa3ae4e1f07b1" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.272565 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.685133 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.777020 5010 generic.go:334] "Generic (PLEG): container finished" podID="7ec0a644-00e0-4b67-b2ad-7a7128dcaf19" containerID="77bee190ca2586e1b8e70290ed192fa8857086e33925598add5a3f3780077f54" exitCode=1 Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.777137 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" event={"ID":"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19","Type":"ContainerDied","Data":"77bee190ca2586e1b8e70290ed192fa8857086e33925598add5a3f3780077f54"} Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.777219 5010 scope.go:117] "RemoveContainer" containerID="f9412429b8a67c0a190bbe69c6b86f7e42407ffc5cb422757ff73d8c857f60d2" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.778059 5010 scope.go:117] "RemoveContainer" containerID="77bee190ca2586e1b8e70290ed192fa8857086e33925598add5a3f3780077f54" Nov 26 16:00:31 crc kubenswrapper[5010]: E1126 16:00:31.778607 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-p5446_openstack-operators(7ec0a644-00e0-4b67-b2ad-7a7128dcaf19)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" podUID="7ec0a644-00e0-4b67-b2ad-7a7128dcaf19" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.785165 5010 generic.go:334] "Generic (PLEG): container finished" podID="1b523418-d938-4ba7-8788-b93b382429e3" containerID="3fb15823f8d1c05707a993c8ddcc0f4f76ee1cdd593624f3974f8b83846f2b4f" exitCode=1 Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.785285 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" event={"ID":"1b523418-d938-4ba7-8788-b93b382429e3","Type":"ContainerDied","Data":"3fb15823f8d1c05707a993c8ddcc0f4f76ee1cdd593624f3974f8b83846f2b4f"} Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.786108 5010 scope.go:117] "RemoveContainer" containerID="3fb15823f8d1c05707a993c8ddcc0f4f76ee1cdd593624f3974f8b83846f2b4f" Nov 26 16:00:31 crc kubenswrapper[5010]: E1126 16:00:31.786554 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-659d75f7c6-lwbrh_openstack-operators(1b523418-d938-4ba7-8788-b93b382429e3)\"" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" podUID="1b523418-d938-4ba7-8788-b93b382429e3" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.790347 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" event={"ID":"82a45cae-9275-4f6a-8807-1ed1c97da89e","Type":"ContainerDied","Data":"cde693afa111afc94e27a937bb9996e93785a653010cebf50c14f08dea5a08a8"} Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.790357 5010 generic.go:334] "Generic (PLEG): container finished" podID="82a45cae-9275-4f6a-8807-1ed1c97da89e" containerID="cde693afa111afc94e27a937bb9996e93785a653010cebf50c14f08dea5a08a8" exitCode=1 Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.791090 5010 scope.go:117] "RemoveContainer" containerID="cde693afa111afc94e27a937bb9996e93785a653010cebf50c14f08dea5a08a8" Nov 26 16:00:31 crc kubenswrapper[5010]: E1126 16:00:31.791480 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-nfl24_openstack-operators(82a45cae-9275-4f6a-8807-1ed1c97da89e)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" podUID="82a45cae-9275-4f6a-8807-1ed1c97da89e" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.862310 5010 scope.go:117] "RemoveContainer" containerID="1e7f60f161902fc7ea8951bcf6c0ff8f1c782354efd64e3efaed636c46858e43" Nov 26 16:00:31 crc kubenswrapper[5010]: I1126 16:00:31.928217 5010 scope.go:117] "RemoveContainer" containerID="154aef2360f415a2ceded687e7fa0a7301fb8088de9bde3d7b7c6cf03b55c884" Nov 26 16:00:32 crc kubenswrapper[5010]: I1126 16:00:32.807094 5010 scope.go:117] "RemoveContainer" containerID="3fb15823f8d1c05707a993c8ddcc0f4f76ee1cdd593624f3974f8b83846f2b4f" Nov 26 16:00:32 crc kubenswrapper[5010]: E1126 16:00:32.807913 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=openstack-operator-controller-manager-659d75f7c6-lwbrh_openstack-operators(1b523418-d938-4ba7-8788-b93b382429e3)\"" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" podUID="1b523418-d938-4ba7-8788-b93b382429e3" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.052057 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.308529 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-544fb75865-bd9lh" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.560873 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.765868 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.843439 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.864956 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.865913 5010 scope.go:117] "RemoveContainer" containerID="a7f59f9e901bf9d7be4b80c40c265cd7c0ed24bc9a7ce075477dec43154728cc" Nov 26 16:00:34 crc kubenswrapper[5010]: E1126 16:00:34.866398 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=barbican-operator-controller-manager-7b64f4fb85-c89k7_openstack-operators(9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f)\"" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" podUID="9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.879770 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.880608 5010 scope.go:117] "RemoveContainer" containerID="64a55030c6c894837eb6db6ae4c70d000febb4d75236220e04fa0ffb47ec39fe" Nov 26 16:00:34 crc kubenswrapper[5010]: E1126 16:00:34.881039 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=cinder-operator-controller-manager-6b7f75547b-sbppr_openstack-operators(6a970d68-d885-4fc2-9d58-508537a42572)\"" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" podUID="6a970d68-d885-4fc2-9d58-508537a42572" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.908491 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.909482 5010 scope.go:117] "RemoveContainer" containerID="0905832a42ac6fe820fcace1b0f0fc38e2399f94f1dbfbf25c2e3609d07d5341" Nov 26 16:00:34 crc kubenswrapper[5010]: E1126 16:00:34.909934 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=designate-operator-controller-manager-955677c94-qmr28_openstack-operators(a4bbf592-007c-4176-a6a3-0209b33b6048)\"" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" podUID="a4bbf592-007c-4176-a6a3-0209b33b6048" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.960961 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 16:00:34 crc kubenswrapper[5010]: I1126 16:00:34.962237 5010 scope.go:117] "RemoveContainer" containerID="388d1fb00f0f35064b72900d498b7f764cfa82db851ce02fa5e1a37b2192fdbe" Nov 26 16:00:34 crc kubenswrapper[5010]: E1126 16:00:34.962940 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=glance-operator-controller-manager-589cbd6b5b-4w8ql_openstack-operators(b6c13a13-621b-45cb-9830-4dfaf15ee06b)\"" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" podUID="b6c13a13-621b-45cb-9830-4dfaf15ee06b" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.113023 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.113823 5010 scope.go:117] "RemoveContainer" containerID="4f6ba9ba8ac763723907ff2cf50cc02e313f97147a3a9e1fe0545a0de914c725" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.114372 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=horizon-operator-controller-manager-5d494799bf-mc96z_openstack-operators(8b2b09a7-2b17-43da-ae0e-4448b96eed50)\"" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" podUID="8b2b09a7-2b17-43da-ae0e-4448b96eed50" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.128507 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.129782 5010 scope.go:117] "RemoveContainer" containerID="bc67c8b3b49e1688ee59470962dc5a6ba475642655cdef41ba5ba20dc42161a0" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.130339 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=keystone-operator-controller-manager-7b4567c7cf-9lx7h_openstack-operators(ce1fedbc-31da-4c37-9731-34e79ab604f4)\"" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" podUID="ce1fedbc-31da-4c37-9731-34e79ab604f4" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.184604 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.218485 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.219762 5010 scope.go:117] "RemoveContainer" containerID="4997370ea983f7691e6654e87ab4cfe917cb8acce21e81c874d286ce490050b1" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.220351 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=mariadb-operator-controller-manager-66f4dd4bc7-sj6tg_openstack-operators(dfb4a15b-a139-4778-acc7-f236e947ca96)\"" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" podUID="dfb4a15b-a139-4778-acc7-f236e947ca96" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.249223 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.250186 5010 scope.go:117] "RemoveContainer" containerID="7a0816c89341c5499119fcedc2fd8fe7e63f5208a8ec5450c940288af755e6d8" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.250766 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=neutron-operator-controller-manager-6fdcddb789-f64fd_openstack-operators(191eef94-8fdf-4180-8ce0-1d62fc3f0de0)\"" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" podUID="191eef94-8fdf-4180-8ce0-1d62fc3f0de0" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.343689 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.387236 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.387989 5010 scope.go:117] "RemoveContainer" containerID="77bee190ca2586e1b8e70290ed192fa8857086e33925598add5a3f3780077f54" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.388233 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=heat-operator-controller-manager-5b77f656f-p5446_openstack-operators(7ec0a644-00e0-4b67-b2ad-7a7128dcaf19)\"" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" podUID="7ec0a644-00e0-4b67-b2ad-7a7128dcaf19" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.443647 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.489126 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.489686 5010 scope.go:117] "RemoveContainer" containerID="45ec74abecd9f369222be85712f2915f55924a6e2ba6f2388b2063ecf35d38e9" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.489975 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=manila-operator-controller-manager-5d499bf58b-k7vx2_openstack-operators(7e5769c2-7f83-41ff-9365-7f5792e8d81b)\"" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" podUID="7e5769c2-7f83-41ff-9365-7f5792e8d81b" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.637303 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.653998 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.655000 5010 scope.go:117] "RemoveContainer" containerID="2817a934c40d92ea12cae6c0c9659d75b36229b260a71786e952932afbc02626" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.655453 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=nova-operator-controller-manager-79556f57fc-5llrj_openstack-operators(05194bfa-88c3-4826-8a59-6d62252e4b1a)\"" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" podUID="05194bfa-88c3-4826-8a59-6d62252e4b1a" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.710515 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.711199 5010 scope.go:117] "RemoveContainer" containerID="b6f727875a57a9ce1407886795665b4028280d6cbab737da62795136e025f4a9" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.711430 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=ovn-operator-controller-manager-56897c768d-gcj9h_openstack-operators(b0d7107e-a617-4a7b-a6e3-0267996965ef)\"" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" podUID="b0d7107e-a617-4a7b-a6e3-0267996965ef" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.748407 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.749133 5010 scope.go:117] "RemoveContainer" containerID="dd0777e2b6d5aa10388112cc1b1d6015a1ec88713ec41070bcf2ccc575e346cd" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.749509 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=placement-operator-controller-manager-57988cc5b5-zq8vc_openstack-operators(bf155072-f786-47eb-9455-f807444d12e9)\"" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" podUID="bf155072-f786-47eb-9455-f807444d12e9" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.751225 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.761466 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.762349 5010 scope.go:117] "RemoveContainer" containerID="cde693afa111afc94e27a937bb9996e93785a653010cebf50c14f08dea5a08a8" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.762863 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=swift-operator-controller-manager-d77b94747-nfl24_openstack-operators(82a45cae-9275-4f6a-8807-1ed1c97da89e)\"" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" podUID="82a45cae-9275-4f6a-8807-1ed1c97da89e" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.774280 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.775163 5010 scope.go:117] "RemoveContainer" containerID="4983f8dd92fd096ad73e67faeb5f8a1f9463456f2fc377546693d958e28e3fa7" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.775494 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=telemetry-operator-controller-manager-76cc84c6bb-zrldc_openstack-operators(01236c17-da54-4428-9e82-9a3b0165d6fc)\"" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" podUID="01236c17-da54-4428-9e82-9a3b0165d6fc" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.797882 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5cd6c7f4c8-xmltd" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.804632 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.831632 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.832298 5010 scope.go:117] "RemoveContainer" containerID="e1337930b34a6a7d3cca8018189124704819e71a5db6061af53d19fb6c5e9a46" Nov 26 16:00:35 crc kubenswrapper[5010]: E1126 16:00:35.832585 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=watcher-operator-controller-manager-656dcb59d4-bdtsk_openstack-operators(522c2ed1-a470-4885-88fc-395ed7834b23)\"" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" podUID="522c2ed1-a470-4885-88fc-395ed7834b23" Nov 26 16:00:35 crc kubenswrapper[5010]: I1126 16:00:35.892387 5010 scope.go:117] "RemoveContainer" containerID="222a4e61999cc7b02b84c863e925834c7eafc4ef1cc9e1e9f11901e1abc296d6" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.039230 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.274976 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.300272 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.387675 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.446892 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.517628 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.696782 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.852529 5010 generic.go:334] "Generic (PLEG): container finished" podID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" containerID="b368b6253164ef3826c5daad96b603655a6301b5bd59b5fa9494aa7f0db8dee1" exitCode=1 Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.852595 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" event={"ID":"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6","Type":"ContainerDied","Data":"b368b6253164ef3826c5daad96b603655a6301b5bd59b5fa9494aa7f0db8dee1"} Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.852776 5010 scope.go:117] "RemoveContainer" containerID="222a4e61999cc7b02b84c863e925834c7eafc4ef1cc9e1e9f11901e1abc296d6" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.854359 5010 scope.go:117] "RemoveContainer" containerID="b368b6253164ef3826c5daad96b603655a6301b5bd59b5fa9494aa7f0db8dee1" Nov 26 16:00:36 crc kubenswrapper[5010]: E1126 16:00:36.855337 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-7757b8b846-drzn5_metallb-system(afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6)\"" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" podUID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.878282 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 16:00:36 crc kubenswrapper[5010]: I1126 16:00:36.878314 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.033143 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-kdxh4" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.343388 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.346386 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-82vqx" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.358837 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.427955 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.466188 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-2w77w" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.597747 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.839511 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.892610 5010 scope.go:117] "RemoveContainer" containerID="f76ff90a964229428c475ae5e7da912de0d886e368d0cbc3ab3611da6efa7414" Nov 26 16:00:37 crc kubenswrapper[5010]: I1126 16:00:37.907443 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-hww96" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.081439 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.148523 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.181850 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.203227 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.367253 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.471788 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.569586 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.622190 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.658443 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.695074 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.833791 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.880452 5010 generic.go:334] "Generic (PLEG): container finished" podID="cdfa6310-b994-49ba-8e89-dc6584a65314" containerID="0d78139dd36dad1399e97c3fc78729d19e2abefdead398c5fe79499ff29668e0" exitCode=1 Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.880511 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" event={"ID":"cdfa6310-b994-49ba-8e89-dc6584a65314","Type":"ContainerDied","Data":"0d78139dd36dad1399e97c3fc78729d19e2abefdead398c5fe79499ff29668e0"} Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.880578 5010 scope.go:117] "RemoveContainer" containerID="f76ff90a964229428c475ae5e7da912de0d886e368d0cbc3ab3611da6efa7414" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.881249 5010 scope.go:117] "RemoveContainer" containerID="0d78139dd36dad1399e97c3fc78729d19e2abefdead398c5fe79499ff29668e0" Nov 26 16:00:38 crc kubenswrapper[5010]: E1126 16:00:38.881642 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-h9gnm_openstack-operators(cdfa6310-b994-49ba-8e89-dc6584a65314)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" podUID="cdfa6310-b994-49ba-8e89-dc6584a65314" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.891323 5010 scope.go:117] "RemoveContainer" containerID="2e366fb2bc04b899db6c098803cd33d1bc5e3de24d15e5a18df32c9460ccc120" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.954202 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.993658 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-694mw" Nov 26 16:00:38 crc kubenswrapper[5010]: I1126 16:00:38.999389 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.031194 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-69lb8" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.120274 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.161879 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.205446 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.271212 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.329664 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.367196 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.401699 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.439835 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-nbfc9" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.456674 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.563266 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.563484 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.704452 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.839511 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.858593 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.887850 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-42sx8" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.895453 5010 generic.go:334] "Generic (PLEG): container finished" podID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" containerID="6b5a5302fcc5c018de9db208f100a9100c8eb892effa385cb040a895afb1e8d4" exitCode=1 Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.920649 5010 scope.go:117] "RemoveContainer" containerID="39d3b7ef31a36555c43a80e0c9fbb3aa8535dd346b1a27390ad9900f22a5cc62" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.921190 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" event={"ID":"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548","Type":"ContainerDied","Data":"6b5a5302fcc5c018de9db208f100a9100c8eb892effa385cb040a895afb1e8d4"} Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.921265 5010 scope.go:117] "RemoveContainer" containerID="2e366fb2bc04b899db6c098803cd33d1bc5e3de24d15e5a18df32c9460ccc120" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.923462 5010 scope.go:117] "RemoveContainer" containerID="6b5a5302fcc5c018de9db208f100a9100c8eb892effa385cb040a895afb1e8d4" Nov 26 16:00:39 crc kubenswrapper[5010]: E1126 16:00:39.923977 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" Nov 26 16:00:39 crc kubenswrapper[5010]: I1126 16:00:39.992124 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.012076 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.024094 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.178726 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.280364 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.345088 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.411750 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-lklfh" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.538090 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.565873 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.573692 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.609903 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-mglbg" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.646086 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-g5pfg" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.719680 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.720604 5010 scope.go:117] "RemoveContainer" containerID="b368b6253164ef3826c5daad96b603655a6301b5bd59b5fa9494aa7f0db8dee1" Nov 26 16:00:40 crc kubenswrapper[5010]: E1126 16:00:40.720976 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-7757b8b846-drzn5_metallb-system(afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6)\"" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" podUID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.755752 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.794078 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.836414 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.862997 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.863925 5010 scope.go:117] "RemoveContainer" containerID="f61948e73ca7c2ee2141ae7a3cfd919477923d37ac589e7f19414f5aa01192d4" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.886443 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.907732 5010 generic.go:334] "Generic (PLEG): container finished" podID="b4799b0e-11ed-4331-84d1-daf581d00bbe" containerID="bfa191fa5b0ad0112f1b7b19e4be3e4a52774d2d8ae773e8e639f8dd5ccd37cb" exitCode=1 Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.907807 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" event={"ID":"b4799b0e-11ed-4331-84d1-daf581d00bbe","Type":"ContainerDied","Data":"bfa191fa5b0ad0112f1b7b19e4be3e4a52774d2d8ae773e8e639f8dd5ccd37cb"} Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.907844 5010 scope.go:117] "RemoveContainer" containerID="39d3b7ef31a36555c43a80e0c9fbb3aa8535dd346b1a27390ad9900f22a5cc62" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.908413 5010 scope.go:117] "RemoveContainer" containerID="bfa191fa5b0ad0112f1b7b19e4be3e4a52774d2d8ae773e8e639f8dd5ccd37cb" Nov 26 16:00:40 crc kubenswrapper[5010]: E1126 16:00:40.908664 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-fx8tr_openstack-operators(b4799b0e-11ed-4331-84d1-daf581d00bbe)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 16:00:40 crc kubenswrapper[5010]: I1126 16:00:40.998042 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-674cb676c8nb4vx" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.228914 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.235694 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.244158 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-8bj6n" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.255700 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.291037 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.320069 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.362496 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.387605 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.545283 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.590025 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.593595 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.621518 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.647107 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.673141 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.684433 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.684505 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.685347 5010 scope.go:117] "RemoveContainer" containerID="3fb15823f8d1c05707a993c8ddcc0f4f76ee1cdd593624f3974f8b83846f2b4f" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.713003 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-qn5m9" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.719475 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.772396 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.779322 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.827301 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.857538 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.886868 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.917569 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.931876 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.932438 5010 generic.go:334] "Generic (PLEG): container finished" podID="93625d2a-6f36-43a8-b26c-8f6506955b15" containerID="4c065cf603ac4997ed25f7dae51d38b780e4d6120f327d9dd51f238201127e87" exitCode=1 Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.932533 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" event={"ID":"93625d2a-6f36-43a8-b26c-8f6506955b15","Type":"ContainerDied","Data":"4c065cf603ac4997ed25f7dae51d38b780e4d6120f327d9dd51f238201127e87"} Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.932567 5010 scope.go:117] "RemoveContainer" containerID="f61948e73ca7c2ee2141ae7a3cfd919477923d37ac589e7f19414f5aa01192d4" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.933245 5010 scope.go:117] "RemoveContainer" containerID="4c065cf603ac4997ed25f7dae51d38b780e4d6120f327d9dd51f238201127e87" Nov 26 16:00:41 crc kubenswrapper[5010]: E1126 16:00:41.933445 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-sxdct_openstack-operators(93625d2a-6f36-43a8-b26c-8f6506955b15)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" podUID="93625d2a-6f36-43a8-b26c-8f6506955b15" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.936884 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.946698 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" event={"ID":"1b523418-d938-4ba7-8788-b93b382429e3","Type":"ContainerStarted","Data":"5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18"} Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.946904 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 16:00:41 crc kubenswrapper[5010]: I1126 16:00:41.949453 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.018495 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.033955 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.062724 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.168560 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.192833 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-9hqt5" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.220061 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.295531 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.376099 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.382392 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-8f5cm" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.400071 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.404335 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.457357 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-bk9gq" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.462971 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-vvd9k" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.475396 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.533574 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.589606 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.590856 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.663944 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.707106 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.717812 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.729587 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.746644 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.846239 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.871483 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.908088 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.957636 5010 generic.go:334] "Generic (PLEG): container finished" podID="1b523418-d938-4ba7-8788-b93b382429e3" containerID="5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18" exitCode=1 Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.958162 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.958196 5010 scope.go:117] "RemoveContainer" containerID="5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18" Nov 26 16:00:42 crc kubenswrapper[5010]: E1126 16:00:42.958445 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-659d75f7c6-lwbrh_openstack-operators(1b523418-d938-4ba7-8788-b93b382429e3)\"" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" podUID="1b523418-d938-4ba7-8788-b93b382429e3" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.959624 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" event={"ID":"1b523418-d938-4ba7-8788-b93b382429e3","Type":"ContainerDied","Data":"5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18"} Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.959697 5010 scope.go:117] "RemoveContainer" containerID="3fb15823f8d1c05707a993c8ddcc0f4f76ee1cdd593624f3974f8b83846f2b4f" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.968040 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.983490 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.989586 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 16:00:42 crc kubenswrapper[5010]: I1126 16:00:42.991683 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.051939 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.056461 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.069548 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.129411 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.172393 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.229139 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.234310 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.261256 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.342507 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.342817 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.387692 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.396579 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-dprmt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.465243 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.492848 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.632772 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.636183 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.679000 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-j5hnm" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.687487 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.717992 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.720028 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.750268 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.757945 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.763821 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.791441 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.804465 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-wl5st" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.808520 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.926984 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.937189 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.971507 5010 scope.go:117] "RemoveContainer" containerID="5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18" Nov 26 16:00:43 crc kubenswrapper[5010]: E1126 16:00:43.971742 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-659d75f7c6-lwbrh_openstack-operators(1b523418-d938-4ba7-8788-b93b382429e3)\"" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" podUID="1b523418-d938-4ba7-8788-b93b382429e3" Nov 26 16:00:43 crc kubenswrapper[5010]: I1126 16:00:43.986009 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-lvcxf" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.048912 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.106100 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.193923 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.212923 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-j2dzs" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.271158 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.330126 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.419169 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.419806 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.468841 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.597838 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-gp9n4" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.611999 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.612611 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.663860 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.714725 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-twrjt" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.787695 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.814615 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.837276 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.864741 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.865604 5010 scope.go:117] "RemoveContainer" containerID="a7f59f9e901bf9d7be4b80c40c265cd7c0ed24bc9a7ce075477dec43154728cc" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.874655 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.879927 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.880353 5010 scope.go:117] "RemoveContainer" containerID="64a55030c6c894837eb6db6ae4c70d000febb4d75236220e04fa0ffb47ec39fe" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.908813 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.909662 5010 scope.go:117] "RemoveContainer" containerID="0905832a42ac6fe820fcace1b0f0fc38e2399f94f1dbfbf25c2e3609d07d5341" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.917097 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.923604 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.937543 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.952100 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.962042 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 16:00:44 crc kubenswrapper[5010]: I1126 16:00:44.963614 5010 scope.go:117] "RemoveContainer" containerID="388d1fb00f0f35064b72900d498b7f764cfa82db851ce02fa5e1a37b2192fdbe" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.061988 5010 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.090819 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-9jvtv" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.112760 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.113432 5010 scope.go:117] "RemoveContainer" containerID="4f6ba9ba8ac763723907ff2cf50cc02e313f97147a3a9e1fe0545a0de914c725" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.114614 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.115072 5010 scope.go:117] "RemoveContainer" containerID="6b5a5302fcc5c018de9db208f100a9100c8eb892effa385cb040a895afb1e8d4" Nov 26 16:00:45 crc kubenswrapper[5010]: E1126 16:00:45.115329 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.115386 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.147927 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.148484 5010 scope.go:117] "RemoveContainer" containerID="bc67c8b3b49e1688ee59470962dc5a6ba475642655cdef41ba5ba20dc42161a0" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.174453 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.201503 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.219541 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.220238 5010 scope.go:117] "RemoveContainer" containerID="4997370ea983f7691e6654e87ab4cfe917cb8acce21e81c874d286ce490050b1" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.238138 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.238406 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.248676 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.249499 5010 scope.go:117] "RemoveContainer" containerID="7a0816c89341c5499119fcedc2fd8fe7e63f5208a8ec5450c940288af755e6d8" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.334571 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.346730 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.387147 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.387786 5010 scope.go:117] "RemoveContainer" containerID="77bee190ca2586e1b8e70290ed192fa8857086e33925598add5a3f3780077f54" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.437312 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.461563 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.489408 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.489860 5010 scope.go:117] "RemoveContainer" containerID="45ec74abecd9f369222be85712f2915f55924a6e2ba6f2388b2063ecf35d38e9" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.507471 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.545454 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.580723 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.584137 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.653417 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.654101 5010 scope.go:117] "RemoveContainer" containerID="2817a934c40d92ea12cae6c0c9659d75b36229b260a71786e952932afbc02626" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.677172 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.677349 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.677386 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.677881 5010 scope.go:117] "RemoveContainer" containerID="bfa191fa5b0ad0112f1b7b19e4be3e4a52774d2d8ae773e8e639f8dd5ccd37cb" Nov 26 16:00:45 crc kubenswrapper[5010]: E1126 16:00:45.678091 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-fx8tr_openstack-operators(b4799b0e-11ed-4331-84d1-daf581d00bbe)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.709887 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.710663 5010 scope.go:117] "RemoveContainer" containerID="b6f727875a57a9ce1407886795665b4028280d6cbab737da62795136e025f4a9" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.748133 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.749156 5010 scope.go:117] "RemoveContainer" containerID="dd0777e2b6d5aa10388112cc1b1d6015a1ec88713ec41070bcf2ccc575e346cd" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.761489 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.762159 5010 scope.go:117] "RemoveContainer" containerID="cde693afa111afc94e27a937bb9996e93785a653010cebf50c14f08dea5a08a8" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.763599 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.775076 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.775491 5010 scope.go:117] "RemoveContainer" containerID="4983f8dd92fd096ad73e67faeb5f8a1f9463456f2fc377546693d958e28e3fa7" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.832194 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.832759 5010 scope.go:117] "RemoveContainer" containerID="e1337930b34a6a7d3cca8018189124704819e71a5db6061af53d19fb6c5e9a46" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.869383 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.997862 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" event={"ID":"dfb4a15b-a139-4778-acc7-f236e947ca96","Type":"ContainerStarted","Data":"c552f75d3fbec37e813bd47ccad1f2f771d686998a015f4eb8176906012f3890"} Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.998824 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 16:00:45 crc kubenswrapper[5010]: I1126 16:00:45.999001 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.003184 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" event={"ID":"b0d7107e-a617-4a7b-a6e3-0267996965ef","Type":"ContainerStarted","Data":"8cbff6c490789667e99eb43e68e4a8f28c66eac2d05a1204b0bfb6fac3ea2a57"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.003374 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.005696 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" event={"ID":"b6c13a13-621b-45cb-9830-4dfaf15ee06b","Type":"ContainerStarted","Data":"cb3590414c111b1581a356832d7924d1c2660148d19c42dd8fb1d51437ae9037"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.005961 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.008464 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" event={"ID":"82a45cae-9275-4f6a-8807-1ed1c97da89e","Type":"ContainerStarted","Data":"ecabed7f949762ee4bb6f3592e1e1cece854c96dbef7743339626ed2fa8b97ca"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.008672 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.010825 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" event={"ID":"a4bbf592-007c-4176-a6a3-0209b33b6048","Type":"ContainerStarted","Data":"c954b59de890f8d1875bf5be4234af75b81107fd1e926ac4b9103f01ac4d11a9"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.010991 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.013169 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" event={"ID":"05194bfa-88c3-4826-8a59-6d62252e4b1a","Type":"ContainerStarted","Data":"2a23b4aa1a895d005220db8e133fda51538fc96dcaacfc6e3b7bc1cfc5e66748"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.013293 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.032941 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" event={"ID":"8b2b09a7-2b17-43da-ae0e-4448b96eed50","Type":"ContainerStarted","Data":"1748725cecf85a6f2537233b26868759a65c75b9c40fe6b81e6a2f8394101fda"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.033200 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.035723 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" event={"ID":"191eef94-8fdf-4180-8ce0-1d62fc3f0de0","Type":"ContainerStarted","Data":"ca568defa4a6a13b50242c39ed27cd4f41ba621f5a91ea3923691f2385b1ad8b"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.035932 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.037593 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" event={"ID":"7ec0a644-00e0-4b67-b2ad-7a7128dcaf19","Type":"ContainerStarted","Data":"9a8f4e32fdacc02a99b670f76616f2f91f20f35ab644e0fba7156db2bd530a9e"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.037780 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.039853 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" event={"ID":"6a970d68-d885-4fc2-9d58-508537a42572","Type":"ContainerStarted","Data":"0955a7b668b24a60a6213652bdb84c8d69d26fe777d710abed6d178d55f2cb9f"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.040036 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.042243 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" event={"ID":"9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f","Type":"ContainerStarted","Data":"bf16226253d724cbaf43d00760419de0ec291be61eab923476164e77bd383a04"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.042406 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.043202 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.044323 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" event={"ID":"7e5769c2-7f83-41ff-9365-7f5792e8d81b","Type":"ContainerStarted","Data":"903f514df4e79199997dfec8cb918e28a297f2772844a61b138764f3e5412e7d"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.045058 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.047321 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" event={"ID":"ce1fedbc-31da-4c37-9731-34e79ab604f4","Type":"ContainerStarted","Data":"939f7f982aad59f7ade7bc09126776bf4349fc74d8c975ce1ffb295b52266d40"} Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.047784 5010 scope.go:117] "RemoveContainer" containerID="6b5a5302fcc5c018de9db208f100a9100c8eb892effa385cb040a895afb1e8d4" Nov 26 16:00:46 crc kubenswrapper[5010]: E1126 16:00:46.047978 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=ironic-operator-controller-manager-67cb4dc6d4-dhngn_openstack-operators(ec8d3bdf-fc89-426b-82e9-a1ae81a3e548)\"" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" podUID="ec8d3bdf-fc89-426b-82e9-a1ae81a3e548" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.108319 5010 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.142582 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.149985 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.170178 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.265002 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.265334 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.282919 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.329058 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.336046 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.428594 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.456554 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.457697 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.505764 5010 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.556117 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.568655 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-2mmwj" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.571007 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.714783 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.743937 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.784845 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.786005 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.790092 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-696hv" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.800154 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-wsf8v" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.861971 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.879684 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 16:00:46 crc kubenswrapper[5010]: I1126 16:00:46.906409 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-bxffz" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.000966 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.015029 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.027551 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.057404 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" event={"ID":"bf155072-f786-47eb-9455-f807444d12e9","Type":"ContainerStarted","Data":"cf3da7670b1bc775dcd51a3b84831e704a32452e2efbeb19736cae3cb0accdcf"} Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.057835 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.059427 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" event={"ID":"01236c17-da54-4428-9e82-9a3b0165d6fc","Type":"ContainerStarted","Data":"7a8c9fdbeb41219dd46503b0374ab00bc630869996fcafc7dd4ed69957c90570"} Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.059602 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.061810 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" event={"ID":"522c2ed1-a470-4885-88fc-395ed7834b23","Type":"ContainerStarted","Data":"b73494f20e881dbbac2120f0eea3367e6486139a15e243eea3d60a39867e069f"} Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.106393 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.155758 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.231489 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.242889 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.260599 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.335236 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.345936 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.385867 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.416305 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-g7kfh" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.466621 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.488817 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.562761 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.610357 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.611835 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.631680 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.727346 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.767771 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.799559 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.818319 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.856928 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.894031 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.906957 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.917216 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.936246 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.965121 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 16:00:47 crc kubenswrapper[5010]: I1126 16:00:47.991639 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-8wrrj" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.087248 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.089164 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.168296 5010 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.312403 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.632169 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.639767 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.647204 5010 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.662052 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.662121 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.668743 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.681789 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=24.681771791 podStartE2EDuration="24.681771791s" podCreationTimestamp="2025-11-26 16:00:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:00:48.67810581 +0000 UTC m=+2069.468822998" watchObservedRunningTime="2025-11-26 16:00:48.681771791 +0000 UTC m=+2069.472488949" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.710174 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.795901 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.796095 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-wcfnk" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.809454 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.866802 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.884672 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-gsklg" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.897928 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.941237 5010 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-s2d9x" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.962456 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 16:00:48 crc kubenswrapper[5010]: I1126 16:00:48.980795 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.082935 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.100174 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.113180 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.149788 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.202439 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.222154 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.295995 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.296304 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.312752 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.352331 5010 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.363870 5010 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.454789 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.544377 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.577046 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.591514 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.608466 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 16:00:49 crc kubenswrapper[5010]: I1126 16:00:49.770901 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.008749 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.020410 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.064254 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.075647 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.126915 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.154545 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.324132 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.334226 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-8g267" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.633631 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.681993 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.726302 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.750129 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.794493 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.815101 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-94p6q" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.862398 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.863142 5010 scope.go:117] "RemoveContainer" containerID="4c065cf603ac4997ed25f7dae51d38b780e4d6120f327d9dd51f238201127e87" Nov 26 16:00:50 crc kubenswrapper[5010]: E1126 16:00:50.863434 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-sxdct_openstack-operators(93625d2a-6f36-43a8-b26c-8f6506955b15)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" podUID="93625d2a-6f36-43a8-b26c-8f6506955b15" Nov 26 16:00:50 crc kubenswrapper[5010]: I1126 16:00:50.989835 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 16:00:51 crc kubenswrapper[5010]: I1126 16:00:51.144586 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 16:00:51 crc kubenswrapper[5010]: I1126 16:00:51.173222 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 16:00:51 crc kubenswrapper[5010]: I1126 16:00:51.324947 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 16:00:51 crc kubenswrapper[5010]: I1126 16:00:51.361001 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 16:00:51 crc kubenswrapper[5010]: I1126 16:00:51.803447 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 16:00:51 crc kubenswrapper[5010]: I1126 16:00:51.832363 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 16:00:51 crc kubenswrapper[5010]: I1126 16:00:51.891858 5010 scope.go:117] "RemoveContainer" containerID="0d78139dd36dad1399e97c3fc78729d19e2abefdead398c5fe79499ff29668e0" Nov 26 16:00:51 crc kubenswrapper[5010]: E1126 16:00:51.892254 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=operator pod=rabbitmq-cluster-operator-manager-668c99d594-h9gnm_openstack-operators(cdfa6310-b994-49ba-8e89-dc6584a65314)\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" podUID="cdfa6310-b994-49ba-8e89-dc6584a65314" Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.068917 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.120047 5010 generic.go:334] "Generic (PLEG): container finished" podID="ad59753d-a191-4ef5-9945-d1126e81bb8e" containerID="457a9a7838ee1d7e0719d0351a12a74ee98c606335404e5e99269c340fe4c210" exitCode=1 Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.120114 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" event={"ID":"ad59753d-a191-4ef5-9945-d1126e81bb8e","Type":"ContainerDied","Data":"457a9a7838ee1d7e0719d0351a12a74ee98c606335404e5e99269c340fe4c210"} Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.120802 5010 scope.go:117] "RemoveContainer" containerID="457a9a7838ee1d7e0719d0351a12a74ee98c606335404e5e99269c340fe4c210" Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.149807 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.340792 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.731534 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 16:00:52 crc kubenswrapper[5010]: I1126 16:00:52.988116 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" Nov 26 16:00:53 crc kubenswrapper[5010]: I1126 16:00:53.134832 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6pqsn" event={"ID":"ad59753d-a191-4ef5-9945-d1126e81bb8e","Type":"ContainerStarted","Data":"b2c019b5732448ca12e75868106d7ca9614f42c6beca535d8d1f97466ff400c6"} Nov 26 16:00:53 crc kubenswrapper[5010]: I1126 16:00:53.224504 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 16:00:53 crc kubenswrapper[5010]: I1126 16:00:53.436207 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 16:00:54 crc kubenswrapper[5010]: I1126 16:00:54.310436 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 16:00:54 crc kubenswrapper[5010]: I1126 16:00:54.869346 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7b64f4fb85-c89k7" Nov 26 16:00:54 crc kubenswrapper[5010]: I1126 16:00:54.883484 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6b7f75547b-sbppr" Nov 26 16:00:54 crc kubenswrapper[5010]: I1126 16:00:54.891638 5010 scope.go:117] "RemoveContainer" containerID="b368b6253164ef3826c5daad96b603655a6301b5bd59b5fa9494aa7f0db8dee1" Nov 26 16:00:54 crc kubenswrapper[5010]: E1126 16:00:54.892040 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=metallb-operator-controller-manager-7757b8b846-drzn5_metallb-system(afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6)\"" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" podUID="afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6" Nov 26 16:00:54 crc kubenswrapper[5010]: I1126 16:00:54.913129 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-955677c94-qmr28" Nov 26 16:00:54 crc kubenswrapper[5010]: I1126 16:00:54.992764 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-589cbd6b5b-4w8ql" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.114507 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d494799bf-mc96z" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.128908 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.130956 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7b4567c7cf-9lx7h" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.220859 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-66f4dd4bc7-sj6tg" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.250459 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-6fdcddb789-f64fd" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.388816 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5b77f656f-p5446" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.491905 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-5d499bf58b-k7vx2" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.655537 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-79556f57fc-5llrj" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.713338 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-56897c768d-gcj9h" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.750143 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-57988cc5b5-zq8vc" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.764201 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-d77b94747-nfl24" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.780162 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-zrldc" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.832126 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.834252 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-bdtsk" Nov 26 16:00:55 crc kubenswrapper[5010]: I1126 16:00:55.892618 5010 scope.go:117] "RemoveContainer" containerID="5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18" Nov 26 16:00:55 crc kubenswrapper[5010]: E1126 16:00:55.892889 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-659d75f7c6-lwbrh_openstack-operators(1b523418-d938-4ba7-8788-b93b382429e3)\"" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" podUID="1b523418-d938-4ba7-8788-b93b382429e3" Nov 26 16:00:56 crc kubenswrapper[5010]: I1126 16:00:56.892816 5010 scope.go:117] "RemoveContainer" containerID="bfa191fa5b0ad0112f1b7b19e4be3e4a52774d2d8ae773e8e639f8dd5ccd37cb" Nov 26 16:00:56 crc kubenswrapper[5010]: E1126 16:00:56.893677 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=octavia-operator-controller-manager-64cdc6ff96-fx8tr_openstack-operators(b4799b0e-11ed-4331-84d1-daf581d00bbe)\"" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" podUID="b4799b0e-11ed-4331-84d1-daf581d00bbe" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.348695 5010 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.348976 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a" gracePeriod=5 Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.852857 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rk5tm"] Nov 26 16:00:57 crc kubenswrapper[5010]: E1126 16:00:57.853530 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.853565 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 16:00:57 crc kubenswrapper[5010]: E1126 16:00:57.853594 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" containerName="installer" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.853608 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" containerName="installer" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.853948 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8f08243-f216-4515-9f46-b08e1f443c95" containerName="installer" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.853993 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.855996 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.864823 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rk5tm"] Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.981250 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5gz46\" (UniqueName: \"kubernetes.io/projected/2bee45ea-bec2-4a22-ae68-60943f7ed58c-kube-api-access-5gz46\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.981339 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-catalog-content\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:57 crc kubenswrapper[5010]: I1126 16:00:57.981387 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-utilities\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.099821 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-catalog-content\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.100000 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-utilities\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.100180 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5gz46\" (UniqueName: \"kubernetes.io/projected/2bee45ea-bec2-4a22-ae68-60943f7ed58c-kube-api-access-5gz46\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.101996 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-utilities\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.105583 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-catalog-content\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.145516 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5gz46\" (UniqueName: \"kubernetes.io/projected/2bee45ea-bec2-4a22-ae68-60943f7ed58c-kube-api-access-5gz46\") pod \"redhat-marketplace-rk5tm\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.195823 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:00:58 crc kubenswrapper[5010]: I1126 16:00:58.610044 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rk5tm"] Nov 26 16:00:59 crc kubenswrapper[5010]: I1126 16:00:59.206341 5010 generic.go:334] "Generic (PLEG): container finished" podID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerID="2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064" exitCode=0 Nov 26 16:00:59 crc kubenswrapper[5010]: I1126 16:00:59.206468 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rk5tm" event={"ID":"2bee45ea-bec2-4a22-ae68-60943f7ed58c","Type":"ContainerDied","Data":"2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064"} Nov 26 16:00:59 crc kubenswrapper[5010]: I1126 16:00:59.206748 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rk5tm" event={"ID":"2bee45ea-bec2-4a22-ae68-60943f7ed58c","Type":"ContainerStarted","Data":"596c1f58161f941b68823f3535c49b7fe909f971ff589ce75edb6d56d1b4bd45"} Nov 26 16:00:59 crc kubenswrapper[5010]: I1126 16:00:59.916699 5010 scope.go:117] "RemoveContainer" containerID="6b5a5302fcc5c018de9db208f100a9100c8eb892effa385cb040a895afb1e8d4" Nov 26 16:01:00 crc kubenswrapper[5010]: I1126 16:01:00.226299 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" event={"ID":"ec8d3bdf-fc89-426b-82e9-a1ae81a3e548","Type":"ContainerStarted","Data":"f291da46fbeb2810bb7a9fd0d92012755d6013e9f5ee71a3a69514fe2a7ff621"} Nov 26 16:01:00 crc kubenswrapper[5010]: I1126 16:01:00.226787 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 16:01:00 crc kubenswrapper[5010]: I1126 16:01:00.228286 5010 generic.go:334] "Generic (PLEG): container finished" podID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerID="4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53" exitCode=0 Nov 26 16:01:00 crc kubenswrapper[5010]: I1126 16:01:00.228314 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rk5tm" event={"ID":"2bee45ea-bec2-4a22-ae68-60943f7ed58c","Type":"ContainerDied","Data":"4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53"} Nov 26 16:01:00 crc kubenswrapper[5010]: E1126 16:01:00.294375 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2bee45ea_bec2_4a22_ae68_60943f7ed58c.slice/crio-conmon-4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2bee45ea_bec2_4a22_ae68_60943f7ed58c.slice/crio-4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:01:00 crc kubenswrapper[5010]: I1126 16:01:00.862664 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 16:01:00 crc kubenswrapper[5010]: I1126 16:01:00.863565 5010 scope.go:117] "RemoveContainer" containerID="4c065cf603ac4997ed25f7dae51d38b780e4d6120f327d9dd51f238201127e87" Nov 26 16:01:00 crc kubenswrapper[5010]: E1126 16:01:00.863843 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=infra-operator-controller-manager-57548d458d-sxdct_openstack-operators(93625d2a-6f36-43a8-b26c-8f6506955b15)\"" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" podUID="93625d2a-6f36-43a8-b26c-8f6506955b15" Nov 26 16:01:01 crc kubenswrapper[5010]: I1126 16:01:01.684481 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 16:01:01 crc kubenswrapper[5010]: I1126 16:01:01.685346 5010 scope.go:117] "RemoveContainer" containerID="5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18" Nov 26 16:01:01 crc kubenswrapper[5010]: E1126 16:01:01.685739 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=openstack-operator-controller-manager-659d75f7c6-lwbrh_openstack-operators(1b523418-d938-4ba7-8788-b93b382429e3)\"" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" podUID="1b523418-d938-4ba7-8788-b93b382429e3" Nov 26 16:01:02 crc kubenswrapper[5010]: I1126 16:01:02.952584 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 16:01:02 crc kubenswrapper[5010]: I1126 16:01:02.952884 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081273 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081428 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081480 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081557 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081594 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081662 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081855 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.081996 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.082618 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.082675 5010 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.082692 5010 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.082745 5010 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.089240 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.185440 5010 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.185470 5010 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.201103 5010 scope.go:117] "RemoveContainer" containerID="05ed2c83ae3c218944dc1a4d86cde50a307f28e77065ce746efe55da7554c619" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.259666 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.259766 5010 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a" exitCode=137 Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.259863 5010 scope.go:117] "RemoveContainer" containerID="0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.259862 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.290768 5010 scope.go:117] "RemoveContainer" containerID="0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a" Nov 26 16:01:03 crc kubenswrapper[5010]: E1126 16:01:03.291358 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a\": container with ID starting with 0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a not found: ID does not exist" containerID="0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.291467 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a"} err="failed to get container status \"0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a\": rpc error: code = NotFound desc = could not find container \"0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a\": container with ID starting with 0d3cc861047c1ee108950bf67a36f7af9404d3318420fa312f86a9ba8d925e6a not found: ID does not exist" Nov 26 16:01:03 crc kubenswrapper[5010]: I1126 16:01:03.905987 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 26 16:01:04 crc kubenswrapper[5010]: I1126 16:01:04.270490 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rk5tm" event={"ID":"2bee45ea-bec2-4a22-ae68-60943f7ed58c","Type":"ContainerStarted","Data":"d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3"} Nov 26 16:01:04 crc kubenswrapper[5010]: I1126 16:01:04.288491 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rk5tm" podStartSLOduration=3.1635418 podStartE2EDuration="7.288471839s" podCreationTimestamp="2025-11-26 16:00:57 +0000 UTC" firstStartedPulling="2025-11-26 16:00:59.210471901 +0000 UTC m=+2080.001189059" lastFinishedPulling="2025-11-26 16:01:03.33540195 +0000 UTC m=+2084.126119098" observedRunningTime="2025-11-26 16:01:04.287121595 +0000 UTC m=+2085.077838753" watchObservedRunningTime="2025-11-26 16:01:04.288471839 +0000 UTC m=+2085.079188987" Nov 26 16:01:05 crc kubenswrapper[5010]: I1126 16:01:05.117192 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-67cb4dc6d4-dhngn" Nov 26 16:01:05 crc kubenswrapper[5010]: I1126 16:01:05.893170 5010 scope.go:117] "RemoveContainer" containerID="b368b6253164ef3826c5daad96b603655a6301b5bd59b5fa9494aa7f0db8dee1" Nov 26 16:01:06 crc kubenswrapper[5010]: I1126 16:01:06.304091 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" event={"ID":"afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6","Type":"ContainerStarted","Data":"c27316154e33a1d3003d875d672244ff1c7721aed6df608724f5af12d4329da5"} Nov 26 16:01:06 crc kubenswrapper[5010]: I1126 16:01:06.306632 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 16:01:06 crc kubenswrapper[5010]: I1126 16:01:06.892086 5010 scope.go:117] "RemoveContainer" containerID="0d78139dd36dad1399e97c3fc78729d19e2abefdead398c5fe79499ff29668e0" Nov 26 16:01:07 crc kubenswrapper[5010]: I1126 16:01:07.318657 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-h9gnm" event={"ID":"cdfa6310-b994-49ba-8e89-dc6584a65314","Type":"ContainerStarted","Data":"e5f5fba9753db92ca1a25b8ee3f567535fa35f4498865a884cde95b7d1dcfd6e"} Nov 26 16:01:08 crc kubenswrapper[5010]: I1126 16:01:08.196753 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:01:08 crc kubenswrapper[5010]: I1126 16:01:08.196806 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:01:08 crc kubenswrapper[5010]: I1126 16:01:08.251574 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:01:08 crc kubenswrapper[5010]: I1126 16:01:08.384979 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:01:11 crc kubenswrapper[5010]: I1126 16:01:11.891984 5010 scope.go:117] "RemoveContainer" containerID="bfa191fa5b0ad0112f1b7b19e4be3e4a52774d2d8ae773e8e639f8dd5ccd37cb" Nov 26 16:01:12 crc kubenswrapper[5010]: I1126 16:01:12.368792 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" event={"ID":"b4799b0e-11ed-4331-84d1-daf581d00bbe","Type":"ContainerStarted","Data":"fae526adfc6a3c9aa648c2aac76b14e5b8280fb80ecca40e82dd4275bf2c287a"} Nov 26 16:01:12 crc kubenswrapper[5010]: I1126 16:01:12.369644 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 16:01:14 crc kubenswrapper[5010]: I1126 16:01:14.891517 5010 scope.go:117] "RemoveContainer" containerID="4c065cf603ac4997ed25f7dae51d38b780e4d6120f327d9dd51f238201127e87" Nov 26 16:01:14 crc kubenswrapper[5010]: I1126 16:01:14.892008 5010 scope.go:117] "RemoveContainer" containerID="5ae2237cc03eba087cf28d08ec3051c10e6e75ccfe360c718aeb5716fc264a18" Nov 26 16:01:15 crc kubenswrapper[5010]: I1126 16:01:15.397500 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" event={"ID":"93625d2a-6f36-43a8-b26c-8f6506955b15","Type":"ContainerStarted","Data":"21c0cde5c773dab0efa0e78efd2c72b0f90971e8e28badd8a26c6155aa46679a"} Nov 26 16:01:15 crc kubenswrapper[5010]: I1126 16:01:15.398083 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 16:01:15 crc kubenswrapper[5010]: I1126 16:01:15.399516 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" event={"ID":"1b523418-d938-4ba7-8788-b93b382429e3","Type":"ContainerStarted","Data":"590723615e04099643ec00553928e85b0e16e9378158d866db3717c17a8d8c07"} Nov 26 16:01:15 crc kubenswrapper[5010]: I1126 16:01:15.399721 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.485266 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gxvl7"] Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.487475 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.499298 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gxvl7"] Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.688763 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-blmh4\" (UniqueName: \"kubernetes.io/projected/0482821b-77fb-44bf-8ea8-c4a3de5ff420-kube-api-access-blmh4\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.688823 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-utilities\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.688957 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-catalog-content\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.789668 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-blmh4\" (UniqueName: \"kubernetes.io/projected/0482821b-77fb-44bf-8ea8-c4a3de5ff420-kube-api-access-blmh4\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.789760 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-utilities\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.789823 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-catalog-content\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.790208 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-utilities\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.790286 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-catalog-content\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:16 crc kubenswrapper[5010]: I1126 16:01:16.818816 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-blmh4\" (UniqueName: \"kubernetes.io/projected/0482821b-77fb-44bf-8ea8-c4a3de5ff420-kube-api-access-blmh4\") pod \"certified-operators-gxvl7\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:17 crc kubenswrapper[5010]: I1126 16:01:17.104350 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:17 crc kubenswrapper[5010]: I1126 16:01:17.603990 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gxvl7"] Nov 26 16:01:17 crc kubenswrapper[5010]: W1126 16:01:17.610571 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0482821b_77fb_44bf_8ea8_c4a3de5ff420.slice/crio-01dadb9ed647668fe5d8dba2a2329a316ed5a1979449b5db73312b7ad70cb4ce WatchSource:0}: Error finding container 01dadb9ed647668fe5d8dba2a2329a316ed5a1979449b5db73312b7ad70cb4ce: Status 404 returned error can't find the container with id 01dadb9ed647668fe5d8dba2a2329a316ed5a1979449b5db73312b7ad70cb4ce Nov 26 16:01:18 crc kubenswrapper[5010]: I1126 16:01:18.425364 5010 generic.go:334] "Generic (PLEG): container finished" podID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerID="a364bcc7b5330f54c23e530a333fd3c699c01247cbf814c9852960ed055238ad" exitCode=0 Nov 26 16:01:18 crc kubenswrapper[5010]: I1126 16:01:18.425450 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxvl7" event={"ID":"0482821b-77fb-44bf-8ea8-c4a3de5ff420","Type":"ContainerDied","Data":"a364bcc7b5330f54c23e530a333fd3c699c01247cbf814c9852960ed055238ad"} Nov 26 16:01:18 crc kubenswrapper[5010]: I1126 16:01:18.425656 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxvl7" event={"ID":"0482821b-77fb-44bf-8ea8-c4a3de5ff420","Type":"ContainerStarted","Data":"01dadb9ed647668fe5d8dba2a2329a316ed5a1979449b5db73312b7ad70cb4ce"} Nov 26 16:01:18 crc kubenswrapper[5010]: I1126 16:01:18.854365 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rk5tm"] Nov 26 16:01:18 crc kubenswrapper[5010]: I1126 16:01:18.854605 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rk5tm" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="registry-server" containerID="cri-o://d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3" gracePeriod=2 Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.252104 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.426976 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5gz46\" (UniqueName: \"kubernetes.io/projected/2bee45ea-bec2-4a22-ae68-60943f7ed58c-kube-api-access-5gz46\") pod \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.427108 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-catalog-content\") pod \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.427215 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-utilities\") pod \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\" (UID: \"2bee45ea-bec2-4a22-ae68-60943f7ed58c\") " Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.427944 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-utilities" (OuterVolumeSpecName: "utilities") pod "2bee45ea-bec2-4a22-ae68-60943f7ed58c" (UID: "2bee45ea-bec2-4a22-ae68-60943f7ed58c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.439143 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bee45ea-bec2-4a22-ae68-60943f7ed58c-kube-api-access-5gz46" (OuterVolumeSpecName: "kube-api-access-5gz46") pod "2bee45ea-bec2-4a22-ae68-60943f7ed58c" (UID: "2bee45ea-bec2-4a22-ae68-60943f7ed58c"). InnerVolumeSpecName "kube-api-access-5gz46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.441055 5010 generic.go:334] "Generic (PLEG): container finished" podID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerID="d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3" exitCode=0 Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.441096 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rk5tm" event={"ID":"2bee45ea-bec2-4a22-ae68-60943f7ed58c","Type":"ContainerDied","Data":"d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3"} Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.441132 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rk5tm" event={"ID":"2bee45ea-bec2-4a22-ae68-60943f7ed58c","Type":"ContainerDied","Data":"596c1f58161f941b68823f3535c49b7fe909f971ff589ce75edb6d56d1b4bd45"} Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.441153 5010 scope.go:117] "RemoveContainer" containerID="d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.441214 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rk5tm" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.449228 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2bee45ea-bec2-4a22-ae68-60943f7ed58c" (UID: "2bee45ea-bec2-4a22-ae68-60943f7ed58c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.461932 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cxn48"] Nov 26 16:01:19 crc kubenswrapper[5010]: E1126 16:01:19.462198 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="extract-content" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.462208 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="extract-content" Nov 26 16:01:19 crc kubenswrapper[5010]: E1126 16:01:19.462222 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="extract-utilities" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.462229 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="extract-utilities" Nov 26 16:01:19 crc kubenswrapper[5010]: E1126 16:01:19.462253 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="registry-server" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.462260 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="registry-server" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.462403 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" containerName="registry-server" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.463303 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.471726 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cxn48"] Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.472269 5010 scope.go:117] "RemoveContainer" containerID="4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.506888 5010 scope.go:117] "RemoveContainer" containerID="2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.528720 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.528750 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5gz46\" (UniqueName: \"kubernetes.io/projected/2bee45ea-bec2-4a22-ae68-60943f7ed58c-kube-api-access-5gz46\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.528759 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bee45ea-bec2-4a22-ae68-60943f7ed58c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.534780 5010 scope.go:117] "RemoveContainer" containerID="d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3" Nov 26 16:01:19 crc kubenswrapper[5010]: E1126 16:01:19.535217 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3\": container with ID starting with d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3 not found: ID does not exist" containerID="d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.535257 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3"} err="failed to get container status \"d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3\": rpc error: code = NotFound desc = could not find container \"d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3\": container with ID starting with d6828694c80bc35bbec4385ea5304ac97c3331e71e426b8a1ad116bb5382d3d3 not found: ID does not exist" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.535284 5010 scope.go:117] "RemoveContainer" containerID="4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53" Nov 26 16:01:19 crc kubenswrapper[5010]: E1126 16:01:19.535770 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53\": container with ID starting with 4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53 not found: ID does not exist" containerID="4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.535841 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53"} err="failed to get container status \"4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53\": rpc error: code = NotFound desc = could not find container \"4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53\": container with ID starting with 4980adcf3144c73a77acf2008871db146df33eae401f4bf56506b137b3aedb53 not found: ID does not exist" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.535875 5010 scope.go:117] "RemoveContainer" containerID="2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064" Nov 26 16:01:19 crc kubenswrapper[5010]: E1126 16:01:19.536349 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064\": container with ID starting with 2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064 not found: ID does not exist" containerID="2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.536383 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064"} err="failed to get container status \"2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064\": rpc error: code = NotFound desc = could not find container \"2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064\": container with ID starting with 2c2eed17985fec5f489e8923fe6da69a0330661284c356c7f4abecd318ad4064 not found: ID does not exist" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.629660 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmdws\" (UniqueName: \"kubernetes.io/projected/a36f5bdc-9a19-427a-be02-f04547306508-kube-api-access-zmdws\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.629738 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-catalog-content\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.630387 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-utilities\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.731907 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-catalog-content\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.731989 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-utilities\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.732057 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmdws\" (UniqueName: \"kubernetes.io/projected/a36f5bdc-9a19-427a-be02-f04547306508-kube-api-access-zmdws\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.732568 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-catalog-content\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.732639 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-utilities\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.751202 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmdws\" (UniqueName: \"kubernetes.io/projected/a36f5bdc-9a19-427a-be02-f04547306508-kube-api-access-zmdws\") pod \"redhat-operators-cxn48\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.769768 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rk5tm"] Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.776231 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rk5tm"] Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.792007 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:19 crc kubenswrapper[5010]: I1126 16:01:19.909380 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bee45ea-bec2-4a22-ae68-60943f7ed58c" path="/var/lib/kubelet/pods/2bee45ea-bec2-4a22-ae68-60943f7ed58c/volumes" Nov 26 16:01:20 crc kubenswrapper[5010]: I1126 16:01:20.266394 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cxn48"] Nov 26 16:01:20 crc kubenswrapper[5010]: I1126 16:01:20.449290 5010 generic.go:334] "Generic (PLEG): container finished" podID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerID="370e92927b68687721c05e9b45bbed510c64fb38398b92abbfea95e1a79aeaf8" exitCode=0 Nov 26 16:01:20 crc kubenswrapper[5010]: I1126 16:01:20.449362 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxvl7" event={"ID":"0482821b-77fb-44bf-8ea8-c4a3de5ff420","Type":"ContainerDied","Data":"370e92927b68687721c05e9b45bbed510c64fb38398b92abbfea95e1a79aeaf8"} Nov 26 16:01:20 crc kubenswrapper[5010]: I1126 16:01:20.452164 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxn48" event={"ID":"a36f5bdc-9a19-427a-be02-f04547306508","Type":"ContainerStarted","Data":"09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e"} Nov 26 16:01:20 crc kubenswrapper[5010]: I1126 16:01:20.452211 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxn48" event={"ID":"a36f5bdc-9a19-427a-be02-f04547306508","Type":"ContainerStarted","Data":"16f0dfd1c80d66ec10317c7f21f041e93cf27b557791b16bb98e4830db546286"} Nov 26 16:01:20 crc kubenswrapper[5010]: I1126 16:01:20.867908 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-sxdct" Nov 26 16:01:21 crc kubenswrapper[5010]: I1126 16:01:21.460339 5010 generic.go:334] "Generic (PLEG): container finished" podID="a36f5bdc-9a19-427a-be02-f04547306508" containerID="09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e" exitCode=0 Nov 26 16:01:21 crc kubenswrapper[5010]: I1126 16:01:21.460441 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxn48" event={"ID":"a36f5bdc-9a19-427a-be02-f04547306508","Type":"ContainerDied","Data":"09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e"} Nov 26 16:01:21 crc kubenswrapper[5010]: I1126 16:01:21.463419 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxvl7" event={"ID":"0482821b-77fb-44bf-8ea8-c4a3de5ff420","Type":"ContainerStarted","Data":"1995a582772b406a1f7ab0d58fc8076276776343742d38aa34f06e165cd6b9ca"} Nov 26 16:01:21 crc kubenswrapper[5010]: I1126 16:01:21.689904 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-659d75f7c6-lwbrh" Nov 26 16:01:21 crc kubenswrapper[5010]: I1126 16:01:21.711914 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gxvl7" podStartSLOduration=3.052754781 podStartE2EDuration="5.711894107s" podCreationTimestamp="2025-11-26 16:01:16 +0000 UTC" firstStartedPulling="2025-11-26 16:01:18.427036453 +0000 UTC m=+2099.217753611" lastFinishedPulling="2025-11-26 16:01:21.086175789 +0000 UTC m=+2101.876892937" observedRunningTime="2025-11-26 16:01:21.526008741 +0000 UTC m=+2102.316725889" watchObservedRunningTime="2025-11-26 16:01:21.711894107 +0000 UTC m=+2102.502611255" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.060672 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fh9mf"] Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.062037 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.067768 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nphh\" (UniqueName: \"kubernetes.io/projected/ddaaa07f-c86c-438b-80c4-3da416ecba69-kube-api-access-4nphh\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.067862 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-utilities\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.067926 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-catalog-content\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.073316 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fh9mf"] Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.169342 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nphh\" (UniqueName: \"kubernetes.io/projected/ddaaa07f-c86c-438b-80c4-3da416ecba69-kube-api-access-4nphh\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.169562 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-utilities\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.169684 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-catalog-content\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.170141 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-utilities\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.170355 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-catalog-content\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.202501 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nphh\" (UniqueName: \"kubernetes.io/projected/ddaaa07f-c86c-438b-80c4-3da416ecba69-kube-api-access-4nphh\") pod \"redhat-marketplace-fh9mf\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.382640 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:22 crc kubenswrapper[5010]: I1126 16:01:22.627359 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fh9mf"] Nov 26 16:01:22 crc kubenswrapper[5010]: W1126 16:01:22.631728 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podddaaa07f_c86c_438b_80c4_3da416ecba69.slice/crio-a3de926432da5344a3be70be19e013e019ed74c02330de5c35b5c6ca91503924 WatchSource:0}: Error finding container a3de926432da5344a3be70be19e013e019ed74c02330de5c35b5c6ca91503924: Status 404 returned error can't find the container with id a3de926432da5344a3be70be19e013e019ed74c02330de5c35b5c6ca91503924 Nov 26 16:01:23 crc kubenswrapper[5010]: I1126 16:01:23.480966 5010 generic.go:334] "Generic (PLEG): container finished" podID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerID="c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb" exitCode=0 Nov 26 16:01:23 crc kubenswrapper[5010]: I1126 16:01:23.481029 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fh9mf" event={"ID":"ddaaa07f-c86c-438b-80c4-3da416ecba69","Type":"ContainerDied","Data":"c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb"} Nov 26 16:01:23 crc kubenswrapper[5010]: I1126 16:01:23.481216 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fh9mf" event={"ID":"ddaaa07f-c86c-438b-80c4-3da416ecba69","Type":"ContainerStarted","Data":"a3de926432da5344a3be70be19e013e019ed74c02330de5c35b5c6ca91503924"} Nov 26 16:01:25 crc kubenswrapper[5010]: I1126 16:01:25.497598 5010 generic.go:334] "Generic (PLEG): container finished" podID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerID="cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749" exitCode=0 Nov 26 16:01:25 crc kubenswrapper[5010]: I1126 16:01:25.497679 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fh9mf" event={"ID":"ddaaa07f-c86c-438b-80c4-3da416ecba69","Type":"ContainerDied","Data":"cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749"} Nov 26 16:01:25 crc kubenswrapper[5010]: I1126 16:01:25.680240 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-fx8tr" Nov 26 16:01:26 crc kubenswrapper[5010]: I1126 16:01:26.510296 5010 generic.go:334] "Generic (PLEG): container finished" podID="a36f5bdc-9a19-427a-be02-f04547306508" containerID="735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5" exitCode=0 Nov 26 16:01:26 crc kubenswrapper[5010]: I1126 16:01:26.510390 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxn48" event={"ID":"a36f5bdc-9a19-427a-be02-f04547306508","Type":"ContainerDied","Data":"735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5"} Nov 26 16:01:26 crc kubenswrapper[5010]: I1126 16:01:26.513180 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fh9mf" event={"ID":"ddaaa07f-c86c-438b-80c4-3da416ecba69","Type":"ContainerStarted","Data":"5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da"} Nov 26 16:01:26 crc kubenswrapper[5010]: I1126 16:01:26.555181 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fh9mf" podStartSLOduration=1.985564959 podStartE2EDuration="4.555166889s" podCreationTimestamp="2025-11-26 16:01:22 +0000 UTC" firstStartedPulling="2025-11-26 16:01:23.482944638 +0000 UTC m=+2104.273661786" lastFinishedPulling="2025-11-26 16:01:26.052546568 +0000 UTC m=+2106.843263716" observedRunningTime="2025-11-26 16:01:26.552100802 +0000 UTC m=+2107.342817950" watchObservedRunningTime="2025-11-26 16:01:26.555166889 +0000 UTC m=+2107.345884037" Nov 26 16:01:27 crc kubenswrapper[5010]: I1126 16:01:27.104912 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:27 crc kubenswrapper[5010]: I1126 16:01:27.105494 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:27 crc kubenswrapper[5010]: I1126 16:01:27.142086 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:27 crc kubenswrapper[5010]: I1126 16:01:27.522329 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxn48" event={"ID":"a36f5bdc-9a19-427a-be02-f04547306508","Type":"ContainerStarted","Data":"8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07"} Nov 26 16:01:27 crc kubenswrapper[5010]: I1126 16:01:27.545144 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cxn48" podStartSLOduration=3.103744989 podStartE2EDuration="8.545123859s" podCreationTimestamp="2025-11-26 16:01:19 +0000 UTC" firstStartedPulling="2025-11-26 16:01:21.461440658 +0000 UTC m=+2102.252157806" lastFinishedPulling="2025-11-26 16:01:26.902819528 +0000 UTC m=+2107.693536676" observedRunningTime="2025-11-26 16:01:27.543777635 +0000 UTC m=+2108.334494813" watchObservedRunningTime="2025-11-26 16:01:27.545123859 +0000 UTC m=+2108.335841007" Nov 26 16:01:27 crc kubenswrapper[5010]: I1126 16:01:27.570008 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:29 crc kubenswrapper[5010]: I1126 16:01:29.792619 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:29 crc kubenswrapper[5010]: I1126 16:01:29.793897 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:30 crc kubenswrapper[5010]: I1126 16:01:30.861337 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-cxn48" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="registry-server" probeResult="failure" output=< Nov 26 16:01:30 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 16:01:30 crc kubenswrapper[5010]: > Nov 26 16:01:31 crc kubenswrapper[5010]: I1126 16:01:31.460196 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gxvl7"] Nov 26 16:01:31 crc kubenswrapper[5010]: I1126 16:01:31.460566 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gxvl7" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="registry-server" containerID="cri-o://1995a582772b406a1f7ab0d58fc8076276776343742d38aa34f06e165cd6b9ca" gracePeriod=2 Nov 26 16:01:32 crc kubenswrapper[5010]: I1126 16:01:32.383389 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:32 crc kubenswrapper[5010]: I1126 16:01:32.384209 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:32 crc kubenswrapper[5010]: I1126 16:01:32.438470 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:32 crc kubenswrapper[5010]: I1126 16:01:32.596185 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:33 crc kubenswrapper[5010]: I1126 16:01:33.563059 5010 generic.go:334] "Generic (PLEG): container finished" podID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerID="1995a582772b406a1f7ab0d58fc8076276776343742d38aa34f06e165cd6b9ca" exitCode=0 Nov 26 16:01:33 crc kubenswrapper[5010]: I1126 16:01:33.563963 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxvl7" event={"ID":"0482821b-77fb-44bf-8ea8-c4a3de5ff420","Type":"ContainerDied","Data":"1995a582772b406a1f7ab0d58fc8076276776343742d38aa34f06e165cd6b9ca"} Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.068114 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.134636 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-blmh4\" (UniqueName: \"kubernetes.io/projected/0482821b-77fb-44bf-8ea8-c4a3de5ff420-kube-api-access-blmh4\") pod \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.134771 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-utilities\") pod \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.134868 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-catalog-content\") pod \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\" (UID: \"0482821b-77fb-44bf-8ea8-c4a3de5ff420\") " Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.137263 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-utilities" (OuterVolumeSpecName: "utilities") pod "0482821b-77fb-44bf-8ea8-c4a3de5ff420" (UID: "0482821b-77fb-44bf-8ea8-c4a3de5ff420"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.143341 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0482821b-77fb-44bf-8ea8-c4a3de5ff420-kube-api-access-blmh4" (OuterVolumeSpecName: "kube-api-access-blmh4") pod "0482821b-77fb-44bf-8ea8-c4a3de5ff420" (UID: "0482821b-77fb-44bf-8ea8-c4a3de5ff420"). InnerVolumeSpecName "kube-api-access-blmh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.192767 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0482821b-77fb-44bf-8ea8-c4a3de5ff420" (UID: "0482821b-77fb-44bf-8ea8-c4a3de5ff420"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.236850 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.236920 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-blmh4\" (UniqueName: \"kubernetes.io/projected/0482821b-77fb-44bf-8ea8-c4a3de5ff420-kube-api-access-blmh4\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.236958 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0482821b-77fb-44bf-8ea8-c4a3de5ff420-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.588382 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gxvl7" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.592125 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gxvl7" event={"ID":"0482821b-77fb-44bf-8ea8-c4a3de5ff420","Type":"ContainerDied","Data":"01dadb9ed647668fe5d8dba2a2329a316ed5a1979449b5db73312b7ad70cb4ce"} Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.592331 5010 scope.go:117] "RemoveContainer" containerID="1995a582772b406a1f7ab0d58fc8076276776343742d38aa34f06e165cd6b9ca" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.636105 5010 scope.go:117] "RemoveContainer" containerID="370e92927b68687721c05e9b45bbed510c64fb38398b92abbfea95e1a79aeaf8" Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.639059 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gxvl7"] Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.648171 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gxvl7"] Nov 26 16:01:34 crc kubenswrapper[5010]: I1126 16:01:34.661658 5010 scope.go:117] "RemoveContainer" containerID="a364bcc7b5330f54c23e530a333fd3c699c01247cbf814c9852960ed055238ad" Nov 26 16:01:35 crc kubenswrapper[5010]: I1126 16:01:35.903231 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" path="/var/lib/kubelet/pods/0482821b-77fb-44bf-8ea8-c4a3de5ff420/volumes" Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.454599 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fh9mf"] Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.455159 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fh9mf" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="registry-server" containerID="cri-o://5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da" gracePeriod=2 Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.854188 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.973870 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4nphh\" (UniqueName: \"kubernetes.io/projected/ddaaa07f-c86c-438b-80c4-3da416ecba69-kube-api-access-4nphh\") pod \"ddaaa07f-c86c-438b-80c4-3da416ecba69\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.973997 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-catalog-content\") pod \"ddaaa07f-c86c-438b-80c4-3da416ecba69\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.974041 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-utilities\") pod \"ddaaa07f-c86c-438b-80c4-3da416ecba69\" (UID: \"ddaaa07f-c86c-438b-80c4-3da416ecba69\") " Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.975072 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-utilities" (OuterVolumeSpecName: "utilities") pod "ddaaa07f-c86c-438b-80c4-3da416ecba69" (UID: "ddaaa07f-c86c-438b-80c4-3da416ecba69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.980250 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddaaa07f-c86c-438b-80c4-3da416ecba69-kube-api-access-4nphh" (OuterVolumeSpecName: "kube-api-access-4nphh") pod "ddaaa07f-c86c-438b-80c4-3da416ecba69" (UID: "ddaaa07f-c86c-438b-80c4-3da416ecba69"). InnerVolumeSpecName "kube-api-access-4nphh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:01:36 crc kubenswrapper[5010]: I1126 16:01:36.996683 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ddaaa07f-c86c-438b-80c4-3da416ecba69" (UID: "ddaaa07f-c86c-438b-80c4-3da416ecba69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.076195 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.076227 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ddaaa07f-c86c-438b-80c4-3da416ecba69-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.076236 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4nphh\" (UniqueName: \"kubernetes.io/projected/ddaaa07f-c86c-438b-80c4-3da416ecba69-kube-api-access-4nphh\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.618187 5010 generic.go:334] "Generic (PLEG): container finished" podID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerID="5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da" exitCode=0 Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.618247 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fh9mf" event={"ID":"ddaaa07f-c86c-438b-80c4-3da416ecba69","Type":"ContainerDied","Data":"5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da"} Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.618351 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fh9mf" event={"ID":"ddaaa07f-c86c-438b-80c4-3da416ecba69","Type":"ContainerDied","Data":"a3de926432da5344a3be70be19e013e019ed74c02330de5c35b5c6ca91503924"} Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.618354 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fh9mf" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.618384 5010 scope.go:117] "RemoveContainer" containerID="5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.647375 5010 scope.go:117] "RemoveContainer" containerID="cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.662351 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fh9mf"] Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.667716 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fh9mf"] Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.687972 5010 scope.go:117] "RemoveContainer" containerID="c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.714290 5010 scope.go:117] "RemoveContainer" containerID="5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da" Nov 26 16:01:37 crc kubenswrapper[5010]: E1126 16:01:37.714669 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da\": container with ID starting with 5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da not found: ID does not exist" containerID="5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.714700 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da"} err="failed to get container status \"5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da\": rpc error: code = NotFound desc = could not find container \"5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da\": container with ID starting with 5fa54903a9009c2bb963f939bb0ac081313c8f4ff9291fed1ba7b2720c33e3da not found: ID does not exist" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.714779 5010 scope.go:117] "RemoveContainer" containerID="cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749" Nov 26 16:01:37 crc kubenswrapper[5010]: E1126 16:01:37.715068 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749\": container with ID starting with cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749 not found: ID does not exist" containerID="cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.715089 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749"} err="failed to get container status \"cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749\": rpc error: code = NotFound desc = could not find container \"cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749\": container with ID starting with cd4adb1beb89ca6cfc4b1c7ffd923f2881427ea605d3960fa7db6623ef632749 not found: ID does not exist" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.715101 5010 scope.go:117] "RemoveContainer" containerID="c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb" Nov 26 16:01:37 crc kubenswrapper[5010]: E1126 16:01:37.715301 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb\": container with ID starting with c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb not found: ID does not exist" containerID="c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.715322 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb"} err="failed to get container status \"c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb\": rpc error: code = NotFound desc = could not find container \"c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb\": container with ID starting with c55ba0cccbc1dfd828443e8a24896fda9eb0557b61624c3adee9c78f5a5c70cb not found: ID does not exist" Nov 26 16:01:37 crc kubenswrapper[5010]: I1126 16:01:37.907552 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" path="/var/lib/kubelet/pods/ddaaa07f-c86c-438b-80c4-3da416ecba69/volumes" Nov 26 16:01:39 crc kubenswrapper[5010]: I1126 16:01:39.845086 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:39 crc kubenswrapper[5010]: I1126 16:01:39.899670 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:40 crc kubenswrapper[5010]: I1126 16:01:40.722112 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-7757b8b846-drzn5" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272060 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-l5cdd"] Nov 26 16:01:41 crc kubenswrapper[5010]: E1126 16:01:41.272444 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="registry-server" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272460 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="registry-server" Nov 26 16:01:41 crc kubenswrapper[5010]: E1126 16:01:41.272492 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="extract-content" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272500 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="extract-content" Nov 26 16:01:41 crc kubenswrapper[5010]: E1126 16:01:41.272515 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="extract-utilities" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272524 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="extract-utilities" Nov 26 16:01:41 crc kubenswrapper[5010]: E1126 16:01:41.272538 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="registry-server" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272546 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="registry-server" Nov 26 16:01:41 crc kubenswrapper[5010]: E1126 16:01:41.272562 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="extract-utilities" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272572 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="extract-utilities" Nov 26 16:01:41 crc kubenswrapper[5010]: E1126 16:01:41.272587 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="extract-content" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272595 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="extract-content" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272795 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddaaa07f-c86c-438b-80c4-3da416ecba69" containerName="registry-server" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.272839 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0482821b-77fb-44bf-8ea8-c4a3de5ff420" containerName="registry-server" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.274229 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.290920 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l5cdd"] Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.433586 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r97pt\" (UniqueName: \"kubernetes.io/projected/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-kube-api-access-r97pt\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.433667 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-utilities\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.433699 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-catalog-content\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.535092 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-utilities\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.535230 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-catalog-content\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.535415 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r97pt\" (UniqueName: \"kubernetes.io/projected/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-kube-api-access-r97pt\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.535892 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-utilities\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.536012 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-catalog-content\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.572971 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r97pt\" (UniqueName: \"kubernetes.io/projected/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-kube-api-access-r97pt\") pod \"certified-operators-l5cdd\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.593701 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.869818 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ptfb7"] Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.874949 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:41 crc kubenswrapper[5010]: I1126 16:01:41.888414 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptfb7"] Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.045930 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-658c9\" (UniqueName: \"kubernetes.io/projected/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-kube-api-access-658c9\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.046028 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-catalog-content\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.046157 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-utilities\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.075283 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-l5cdd"] Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.147958 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-utilities\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.148098 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-658c9\" (UniqueName: \"kubernetes.io/projected/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-kube-api-access-658c9\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.148150 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-catalog-content\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.148689 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-utilities\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.148730 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-catalog-content\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.173895 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-658c9\" (UniqueName: \"kubernetes.io/projected/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-kube-api-access-658c9\") pod \"redhat-marketplace-ptfb7\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.199647 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.638051 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptfb7"] Nov 26 16:01:42 crc kubenswrapper[5010]: W1126 16:01:42.647674 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51bd722a_ecaf_42bf_973a_6d00e6e6c82b.slice/crio-66cde0dd3ca39da2fb704c6302695cad6c023c354e4f77e6896e3124e9b942a5 WatchSource:0}: Error finding container 66cde0dd3ca39da2fb704c6302695cad6c023c354e4f77e6896e3124e9b942a5: Status 404 returned error can't find the container with id 66cde0dd3ca39da2fb704c6302695cad6c023c354e4f77e6896e3124e9b942a5 Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.673472 5010 generic.go:334] "Generic (PLEG): container finished" podID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerID="76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39" exitCode=0 Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.673530 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l5cdd" event={"ID":"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f","Type":"ContainerDied","Data":"76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39"} Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.673578 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l5cdd" event={"ID":"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f","Type":"ContainerStarted","Data":"9c51a6ff4fcc6db29791b6e44bf4383dbf643ad7c639329f871927ce6949ef20"} Nov 26 16:01:42 crc kubenswrapper[5010]: I1126 16:01:42.674779 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptfb7" event={"ID":"51bd722a-ecaf-42bf-973a-6d00e6e6c82b","Type":"ContainerStarted","Data":"66cde0dd3ca39da2fb704c6302695cad6c023c354e4f77e6896e3124e9b942a5"} Nov 26 16:01:43 crc kubenswrapper[5010]: I1126 16:01:43.684643 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l5cdd" event={"ID":"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f","Type":"ContainerStarted","Data":"c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9"} Nov 26 16:01:43 crc kubenswrapper[5010]: I1126 16:01:43.687356 5010 generic.go:334] "Generic (PLEG): container finished" podID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerID="1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3" exitCode=0 Nov 26 16:01:43 crc kubenswrapper[5010]: I1126 16:01:43.687418 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptfb7" event={"ID":"51bd722a-ecaf-42bf-973a-6d00e6e6c82b","Type":"ContainerDied","Data":"1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3"} Nov 26 16:01:44 crc kubenswrapper[5010]: I1126 16:01:44.698062 5010 generic.go:334] "Generic (PLEG): container finished" podID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerID="c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9" exitCode=0 Nov 26 16:01:44 crc kubenswrapper[5010]: I1126 16:01:44.698111 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l5cdd" event={"ID":"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f","Type":"ContainerDied","Data":"c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9"} Nov 26 16:01:45 crc kubenswrapper[5010]: I1126 16:01:45.707993 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l5cdd" event={"ID":"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f","Type":"ContainerStarted","Data":"69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a"} Nov 26 16:01:45 crc kubenswrapper[5010]: I1126 16:01:45.755112 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-l5cdd" podStartSLOduration=2.305096477 podStartE2EDuration="4.755095517s" podCreationTimestamp="2025-11-26 16:01:41 +0000 UTC" firstStartedPulling="2025-11-26 16:01:42.675206615 +0000 UTC m=+2123.465923763" lastFinishedPulling="2025-11-26 16:01:45.125205655 +0000 UTC m=+2125.915922803" observedRunningTime="2025-11-26 16:01:45.749095437 +0000 UTC m=+2126.539812595" watchObservedRunningTime="2025-11-26 16:01:45.755095517 +0000 UTC m=+2126.545812665" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.059069 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cxn48"] Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.059810 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cxn48" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="registry-server" containerID="cri-o://8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07" gracePeriod=2 Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.532524 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.723380 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zmdws\" (UniqueName: \"kubernetes.io/projected/a36f5bdc-9a19-427a-be02-f04547306508-kube-api-access-zmdws\") pod \"a36f5bdc-9a19-427a-be02-f04547306508\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.723737 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-utilities\") pod \"a36f5bdc-9a19-427a-be02-f04547306508\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.723783 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-catalog-content\") pod \"a36f5bdc-9a19-427a-be02-f04547306508\" (UID: \"a36f5bdc-9a19-427a-be02-f04547306508\") " Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.725414 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-utilities" (OuterVolumeSpecName: "utilities") pod "a36f5bdc-9a19-427a-be02-f04547306508" (UID: "a36f5bdc-9a19-427a-be02-f04547306508"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.732085 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a36f5bdc-9a19-427a-be02-f04547306508-kube-api-access-zmdws" (OuterVolumeSpecName: "kube-api-access-zmdws") pod "a36f5bdc-9a19-427a-be02-f04547306508" (UID: "a36f5bdc-9a19-427a-be02-f04547306508"). InnerVolumeSpecName "kube-api-access-zmdws". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.733061 5010 generic.go:334] "Generic (PLEG): container finished" podID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerID="f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723" exitCode=0 Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.733133 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptfb7" event={"ID":"51bd722a-ecaf-42bf-973a-6d00e6e6c82b","Type":"ContainerDied","Data":"f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723"} Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.744455 5010 generic.go:334] "Generic (PLEG): container finished" podID="a36f5bdc-9a19-427a-be02-f04547306508" containerID="8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07" exitCode=0 Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.745534 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cxn48" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.745584 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxn48" event={"ID":"a36f5bdc-9a19-427a-be02-f04547306508","Type":"ContainerDied","Data":"8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07"} Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.745622 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cxn48" event={"ID":"a36f5bdc-9a19-427a-be02-f04547306508","Type":"ContainerDied","Data":"16f0dfd1c80d66ec10317c7f21f041e93cf27b557791b16bb98e4830db546286"} Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.745654 5010 scope.go:117] "RemoveContainer" containerID="8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.779486 5010 scope.go:117] "RemoveContainer" containerID="735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.803178 5010 scope.go:117] "RemoveContainer" containerID="09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.825113 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.825143 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zmdws\" (UniqueName: \"kubernetes.io/projected/a36f5bdc-9a19-427a-be02-f04547306508-kube-api-access-zmdws\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.832808 5010 scope.go:117] "RemoveContainer" containerID="8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07" Nov 26 16:01:46 crc kubenswrapper[5010]: E1126 16:01:46.833893 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07\": container with ID starting with 8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07 not found: ID does not exist" containerID="8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.833959 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07"} err="failed to get container status \"8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07\": rpc error: code = NotFound desc = could not find container \"8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07\": container with ID starting with 8cbb7bfdb21b26503278bcbd2665ea67122e8eea5f91bb30f049e1f29619de07 not found: ID does not exist" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.833999 5010 scope.go:117] "RemoveContainer" containerID="735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5" Nov 26 16:01:46 crc kubenswrapper[5010]: E1126 16:01:46.834580 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5\": container with ID starting with 735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5 not found: ID does not exist" containerID="735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.834623 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5"} err="failed to get container status \"735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5\": rpc error: code = NotFound desc = could not find container \"735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5\": container with ID starting with 735ac5860bd3779d744d538824ed7ab0c12961f9c4ab44b673e5e44aa26a12f5 not found: ID does not exist" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.834640 5010 scope.go:117] "RemoveContainer" containerID="09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e" Nov 26 16:01:46 crc kubenswrapper[5010]: E1126 16:01:46.835125 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e\": container with ID starting with 09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e not found: ID does not exist" containerID="09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.835164 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e"} err="failed to get container status \"09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e\": rpc error: code = NotFound desc = could not find container \"09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e\": container with ID starting with 09c8a277ad536acfa7e7400dba8148c548e8e8a4827a005090cdf3d0cc86b70e not found: ID does not exist" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.837840 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a36f5bdc-9a19-427a-be02-f04547306508" (UID: "a36f5bdc-9a19-427a-be02-f04547306508"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:46 crc kubenswrapper[5010]: I1126 16:01:46.927286 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a36f5bdc-9a19-427a-be02-f04547306508-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:47 crc kubenswrapper[5010]: I1126 16:01:47.076763 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cxn48"] Nov 26 16:01:47 crc kubenswrapper[5010]: I1126 16:01:47.081898 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cxn48"] Nov 26 16:01:47 crc kubenswrapper[5010]: I1126 16:01:47.758074 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptfb7" event={"ID":"51bd722a-ecaf-42bf-973a-6d00e6e6c82b","Type":"ContainerStarted","Data":"42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0"} Nov 26 16:01:47 crc kubenswrapper[5010]: I1126 16:01:47.787627 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ptfb7" podStartSLOduration=3.28432776 podStartE2EDuration="6.787584833s" podCreationTimestamp="2025-11-26 16:01:41 +0000 UTC" firstStartedPulling="2025-11-26 16:01:43.688610452 +0000 UTC m=+2124.479327600" lastFinishedPulling="2025-11-26 16:01:47.191867525 +0000 UTC m=+2127.982584673" observedRunningTime="2025-11-26 16:01:47.78468296 +0000 UTC m=+2128.575400118" watchObservedRunningTime="2025-11-26 16:01:47.787584833 +0000 UTC m=+2128.578302021" Nov 26 16:01:47 crc kubenswrapper[5010]: I1126 16:01:47.900128 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a36f5bdc-9a19-427a-be02-f04547306508" path="/var/lib/kubelet/pods/a36f5bdc-9a19-427a-be02-f04547306508/volumes" Nov 26 16:01:51 crc kubenswrapper[5010]: I1126 16:01:51.594698 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:51 crc kubenswrapper[5010]: I1126 16:01:51.595031 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:51 crc kubenswrapper[5010]: I1126 16:01:51.664904 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:51 crc kubenswrapper[5010]: I1126 16:01:51.877500 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:52 crc kubenswrapper[5010]: I1126 16:01:52.200434 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:52 crc kubenswrapper[5010]: I1126 16:01:52.200500 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:52 crc kubenswrapper[5010]: I1126 16:01:52.287962 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:52 crc kubenswrapper[5010]: I1126 16:01:52.848513 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.474106 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2r2t6"] Nov 26 16:01:53 crc kubenswrapper[5010]: E1126 16:01:53.474732 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="extract-content" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.474761 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="extract-content" Nov 26 16:01:53 crc kubenswrapper[5010]: E1126 16:01:53.474804 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="registry-server" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.474812 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="registry-server" Nov 26 16:01:53 crc kubenswrapper[5010]: E1126 16:01:53.474830 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="extract-utilities" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.474837 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="extract-utilities" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.475120 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a36f5bdc-9a19-427a-be02-f04547306508" containerName="registry-server" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.476180 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.486663 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2r2t6"] Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.650674 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-catalog-content\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.650835 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-utilities\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.650895 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7h8lz\" (UniqueName: \"kubernetes.io/projected/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-kube-api-access-7h8lz\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.654632 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptfb7"] Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.751616 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-catalog-content\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.751704 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-utilities\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.751776 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7h8lz\" (UniqueName: \"kubernetes.io/projected/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-kube-api-access-7h8lz\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.752165 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-catalog-content\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.752445 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-utilities\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.771156 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7h8lz\" (UniqueName: \"kubernetes.io/projected/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-kube-api-access-7h8lz\") pod \"redhat-operators-2r2t6\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:53 crc kubenswrapper[5010]: I1126 16:01:53.861180 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:01:54 crc kubenswrapper[5010]: I1126 16:01:54.309362 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2r2t6"] Nov 26 16:01:54 crc kubenswrapper[5010]: I1126 16:01:54.810235 5010 generic.go:334] "Generic (PLEG): container finished" podID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerID="9514eb4164e109b5c32b5977082c81d8e6a99df5b215a9aac88b742143c53341" exitCode=0 Nov 26 16:01:54 crc kubenswrapper[5010]: I1126 16:01:54.810274 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2r2t6" event={"ID":"fb39991b-2acf-4bbf-a02c-8d8656fcb79d","Type":"ContainerDied","Data":"9514eb4164e109b5c32b5977082c81d8e6a99df5b215a9aac88b742143c53341"} Nov 26 16:01:54 crc kubenswrapper[5010]: I1126 16:01:54.810313 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2r2t6" event={"ID":"fb39991b-2acf-4bbf-a02c-8d8656fcb79d","Type":"ContainerStarted","Data":"79796b4ba4be96d824597b75e780e2db1775a5975aee5fbb8e19adeb0f4e685b"} Nov 26 16:01:54 crc kubenswrapper[5010]: I1126 16:01:54.810425 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ptfb7" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="registry-server" containerID="cri-o://42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0" gracePeriod=2 Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.231175 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.374169 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-utilities\") pod \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.374262 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-catalog-content\") pod \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.374323 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-658c9\" (UniqueName: \"kubernetes.io/projected/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-kube-api-access-658c9\") pod \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\" (UID: \"51bd722a-ecaf-42bf-973a-6d00e6e6c82b\") " Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.375901 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-utilities" (OuterVolumeSpecName: "utilities") pod "51bd722a-ecaf-42bf-973a-6d00e6e6c82b" (UID: "51bd722a-ecaf-42bf-973a-6d00e6e6c82b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.380932 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-kube-api-access-658c9" (OuterVolumeSpecName: "kube-api-access-658c9") pod "51bd722a-ecaf-42bf-973a-6d00e6e6c82b" (UID: "51bd722a-ecaf-42bf-973a-6d00e6e6c82b"). InnerVolumeSpecName "kube-api-access-658c9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.394720 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "51bd722a-ecaf-42bf-973a-6d00e6e6c82b" (UID: "51bd722a-ecaf-42bf-973a-6d00e6e6c82b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.476031 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.476073 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.476088 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-658c9\" (UniqueName: \"kubernetes.io/projected/51bd722a-ecaf-42bf-973a-6d00e6e6c82b-kube-api-access-658c9\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.823101 5010 generic.go:334] "Generic (PLEG): container finished" podID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerID="42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0" exitCode=0 Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.823155 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptfb7" event={"ID":"51bd722a-ecaf-42bf-973a-6d00e6e6c82b","Type":"ContainerDied","Data":"42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0"} Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.823195 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ptfb7" event={"ID":"51bd722a-ecaf-42bf-973a-6d00e6e6c82b","Type":"ContainerDied","Data":"66cde0dd3ca39da2fb704c6302695cad6c023c354e4f77e6896e3124e9b942a5"} Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.823217 5010 scope.go:117] "RemoveContainer" containerID="42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.823358 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ptfb7" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.855543 5010 scope.go:117] "RemoveContainer" containerID="f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.861134 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l5cdd"] Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.861389 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-l5cdd" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="registry-server" containerID="cri-o://69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a" gracePeriod=2 Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.871648 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptfb7"] Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.880491 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ptfb7"] Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.894898 5010 scope.go:117] "RemoveContainer" containerID="1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3" Nov 26 16:01:55 crc kubenswrapper[5010]: I1126 16:01:55.902719 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" path="/var/lib/kubelet/pods/51bd722a-ecaf-42bf-973a-6d00e6e6c82b/volumes" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.092624 5010 scope.go:117] "RemoveContainer" containerID="42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0" Nov 26 16:01:56 crc kubenswrapper[5010]: E1126 16:01:56.093687 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0\": container with ID starting with 42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0 not found: ID does not exist" containerID="42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.093784 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0"} err="failed to get container status \"42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0\": rpc error: code = NotFound desc = could not find container \"42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0\": container with ID starting with 42b29e8246272638e74c5faf8408c85ee0963a6563ae48ae4d9a65562982b0b0 not found: ID does not exist" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.093811 5010 scope.go:117] "RemoveContainer" containerID="f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723" Nov 26 16:01:56 crc kubenswrapper[5010]: E1126 16:01:56.094182 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723\": container with ID starting with f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723 not found: ID does not exist" containerID="f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.094205 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723"} err="failed to get container status \"f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723\": rpc error: code = NotFound desc = could not find container \"f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723\": container with ID starting with f8d05f725ca260935defea047dba7fa1da62baa2ecae3c37157f604098b6b723 not found: ID does not exist" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.094223 5010 scope.go:117] "RemoveContainer" containerID="1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3" Nov 26 16:01:56 crc kubenswrapper[5010]: E1126 16:01:56.094406 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3\": container with ID starting with 1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3 not found: ID does not exist" containerID="1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.094425 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3"} err="failed to get container status \"1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3\": rpc error: code = NotFound desc = could not find container \"1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3\": container with ID starting with 1f9ca3e5236f4c4c63274cfad1a0948907ab232e838e616c4da691b40bbe59b3 not found: ID does not exist" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.304013 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.401586 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r97pt\" (UniqueName: \"kubernetes.io/projected/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-kube-api-access-r97pt\") pod \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.401655 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-catalog-content\") pod \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.401681 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-utilities\") pod \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\" (UID: \"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f\") " Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.402791 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-utilities" (OuterVolumeSpecName: "utilities") pod "b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" (UID: "b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.405586 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-kube-api-access-r97pt" (OuterVolumeSpecName: "kube-api-access-r97pt") pod "b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" (UID: "b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f"). InnerVolumeSpecName "kube-api-access-r97pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.460342 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" (UID: "b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.502901 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r97pt\" (UniqueName: \"kubernetes.io/projected/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-kube-api-access-r97pt\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.502962 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.502986 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.833340 5010 generic.go:334] "Generic (PLEG): container finished" podID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerID="69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a" exitCode=0 Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.833408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l5cdd" event={"ID":"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f","Type":"ContainerDied","Data":"69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a"} Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.833439 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-l5cdd" event={"ID":"b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f","Type":"ContainerDied","Data":"9c51a6ff4fcc6db29791b6e44bf4383dbf643ad7c639329f871927ce6949ef20"} Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.833459 5010 scope.go:117] "RemoveContainer" containerID="69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.833548 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-l5cdd" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.853526 5010 scope.go:117] "RemoveContainer" containerID="c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.867813 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-l5cdd"] Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.873491 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-l5cdd"] Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.889266 5010 scope.go:117] "RemoveContainer" containerID="76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.906353 5010 scope.go:117] "RemoveContainer" containerID="69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a" Nov 26 16:01:56 crc kubenswrapper[5010]: E1126 16:01:56.906821 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a\": container with ID starting with 69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a not found: ID does not exist" containerID="69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.906980 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a"} err="failed to get container status \"69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a\": rpc error: code = NotFound desc = could not find container \"69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a\": container with ID starting with 69662e8d57cb8a99bd10f4b4d5e878d436ac8024a1dfffebec39127a334d4a4a not found: ID does not exist" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.907111 5010 scope.go:117] "RemoveContainer" containerID="c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9" Nov 26 16:01:56 crc kubenswrapper[5010]: E1126 16:01:56.907504 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9\": container with ID starting with c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9 not found: ID does not exist" containerID="c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.907528 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9"} err="failed to get container status \"c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9\": rpc error: code = NotFound desc = could not find container \"c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9\": container with ID starting with c009789756b63a51830d7e179bdf963ca8763db27262e7ada1e48293b2828be9 not found: ID does not exist" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.907541 5010 scope.go:117] "RemoveContainer" containerID="76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39" Nov 26 16:01:56 crc kubenswrapper[5010]: E1126 16:01:56.907989 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39\": container with ID starting with 76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39 not found: ID does not exist" containerID="76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39" Nov 26 16:01:56 crc kubenswrapper[5010]: I1126 16:01:56.908030 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39"} err="failed to get container status \"76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39\": rpc error: code = NotFound desc = could not find container \"76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39\": container with ID starting with 76ae7af12f186ddd18a55b249d607d540e869ba35ec5e93449d649051482fc39 not found: ID does not exist" Nov 26 16:01:57 crc kubenswrapper[5010]: I1126 16:01:57.862290 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2r2t6" event={"ID":"fb39991b-2acf-4bbf-a02c-8d8656fcb79d","Type":"ContainerStarted","Data":"fb7f6a111f987f2e4fb7adbb7e4aa924d164e8679c5f88c9e84ae867cfb27195"} Nov 26 16:01:57 crc kubenswrapper[5010]: I1126 16:01:57.903514 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" path="/var/lib/kubelet/pods/b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f/volumes" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062404 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-r26tw"] Nov 26 16:01:58 crc kubenswrapper[5010]: E1126 16:01:58.062682 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="registry-server" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062693 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="registry-server" Nov 26 16:01:58 crc kubenswrapper[5010]: E1126 16:01:58.062733 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="extract-content" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062739 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="extract-content" Nov 26 16:01:58 crc kubenswrapper[5010]: E1126 16:01:58.062751 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="extract-utilities" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062761 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="extract-utilities" Nov 26 16:01:58 crc kubenswrapper[5010]: E1126 16:01:58.062773 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="registry-server" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062779 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="registry-server" Nov 26 16:01:58 crc kubenswrapper[5010]: E1126 16:01:58.062793 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="extract-utilities" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062798 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="extract-utilities" Nov 26 16:01:58 crc kubenswrapper[5010]: E1126 16:01:58.062813 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="extract-content" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062818 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="extract-content" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062961 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3c7714d-d46a-43f2-b47d-b61c5b2b3d7f" containerName="registry-server" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.062975 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="51bd722a-ecaf-42bf-973a-6d00e6e6c82b" containerName="registry-server" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.063986 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.080777 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r26tw"] Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.126175 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-catalog-content\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.126266 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktcdg\" (UniqueName: \"kubernetes.io/projected/b1488022-08ea-4c5d-ac17-4245d2bd543c-kube-api-access-ktcdg\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.126331 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-utilities\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.229738 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-catalog-content\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.229819 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktcdg\" (UniqueName: \"kubernetes.io/projected/b1488022-08ea-4c5d-ac17-4245d2bd543c-kube-api-access-ktcdg\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.229858 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-utilities\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.230300 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-catalog-content\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.230340 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-utilities\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.272264 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktcdg\" (UniqueName: \"kubernetes.io/projected/b1488022-08ea-4c5d-ac17-4245d2bd543c-kube-api-access-ktcdg\") pod \"redhat-marketplace-r26tw\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.387380 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.848239 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-r26tw"] Nov 26 16:01:58 crc kubenswrapper[5010]: W1126 16:01:58.853492 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1488022_08ea_4c5d_ac17_4245d2bd543c.slice/crio-e8e7b8f3bc9363aa32788bea022b800c81fdd09b68bad352003c7eb665b541f2 WatchSource:0}: Error finding container e8e7b8f3bc9363aa32788bea022b800c81fdd09b68bad352003c7eb665b541f2: Status 404 returned error can't find the container with id e8e7b8f3bc9363aa32788bea022b800c81fdd09b68bad352003c7eb665b541f2 Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.873596 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r26tw" event={"ID":"b1488022-08ea-4c5d-ac17-4245d2bd543c","Type":"ContainerStarted","Data":"e8e7b8f3bc9363aa32788bea022b800c81fdd09b68bad352003c7eb665b541f2"} Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.875494 5010 generic.go:334] "Generic (PLEG): container finished" podID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerID="fb7f6a111f987f2e4fb7adbb7e4aa924d164e8679c5f88c9e84ae867cfb27195" exitCode=0 Nov 26 16:01:58 crc kubenswrapper[5010]: I1126 16:01:58.875562 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2r2t6" event={"ID":"fb39991b-2acf-4bbf-a02c-8d8656fcb79d","Type":"ContainerDied","Data":"fb7f6a111f987f2e4fb7adbb7e4aa924d164e8679c5f88c9e84ae867cfb27195"} Nov 26 16:01:59 crc kubenswrapper[5010]: I1126 16:01:59.887544 5010 generic.go:334] "Generic (PLEG): container finished" podID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerID="fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811" exitCode=0 Nov 26 16:01:59 crc kubenswrapper[5010]: I1126 16:01:59.887592 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r26tw" event={"ID":"b1488022-08ea-4c5d-ac17-4245d2bd543c","Type":"ContainerDied","Data":"fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811"} Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.262806 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6m9x5"] Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.264198 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.281918 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6m9x5"] Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.364319 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-utilities\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.364430 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jkqj\" (UniqueName: \"kubernetes.io/projected/485a9756-150d-4d43-b0c9-981705bec296-kube-api-access-6jkqj\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.364674 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-catalog-content\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.466110 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-utilities\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.466157 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jkqj\" (UniqueName: \"kubernetes.io/projected/485a9756-150d-4d43-b0c9-981705bec296-kube-api-access-6jkqj\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.466197 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-catalog-content\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.466951 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-catalog-content\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.466963 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-utilities\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.492219 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jkqj\" (UniqueName: \"kubernetes.io/projected/485a9756-150d-4d43-b0c9-981705bec296-kube-api-access-6jkqj\") pod \"certified-operators-6m9x5\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.583809 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:00 crc kubenswrapper[5010]: I1126 16:02:00.898062 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r26tw" event={"ID":"b1488022-08ea-4c5d-ac17-4245d2bd543c","Type":"ContainerStarted","Data":"ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3"} Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.057102 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6m9x5"] Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.905635 5010 generic.go:334] "Generic (PLEG): container finished" podID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerID="ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3" exitCode=0 Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.905690 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r26tw" event={"ID":"b1488022-08ea-4c5d-ac17-4245d2bd543c","Type":"ContainerDied","Data":"ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3"} Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.908219 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2r2t6" event={"ID":"fb39991b-2acf-4bbf-a02c-8d8656fcb79d","Type":"ContainerStarted","Data":"7513293663b4c6ca0e0c5e0ffd97f6124876cf236799c6113679fbe13d05edc6"} Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.909826 5010 generic.go:334] "Generic (PLEG): container finished" podID="485a9756-150d-4d43-b0c9-981705bec296" containerID="90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec" exitCode=0 Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.909868 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6m9x5" event={"ID":"485a9756-150d-4d43-b0c9-981705bec296","Type":"ContainerDied","Data":"90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec"} Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.909898 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6m9x5" event={"ID":"485a9756-150d-4d43-b0c9-981705bec296","Type":"ContainerStarted","Data":"7630fb483ea996c3cab1f8b96acb3ba3bd89faa867895bce37545cc3a325ed07"} Nov 26 16:02:01 crc kubenswrapper[5010]: I1126 16:02:01.988927 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2r2t6" podStartSLOduration=2.866575966 podStartE2EDuration="8.988907411s" podCreationTimestamp="2025-11-26 16:01:53 +0000 UTC" firstStartedPulling="2025-11-26 16:01:54.811581153 +0000 UTC m=+2135.602298301" lastFinishedPulling="2025-11-26 16:02:00.933912598 +0000 UTC m=+2141.724629746" observedRunningTime="2025-11-26 16:02:01.971175199 +0000 UTC m=+2142.761892347" watchObservedRunningTime="2025-11-26 16:02:01.988907411 +0000 UTC m=+2142.779624559" Nov 26 16:02:02 crc kubenswrapper[5010]: I1126 16:02:02.920373 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r26tw" event={"ID":"b1488022-08ea-4c5d-ac17-4245d2bd543c","Type":"ContainerStarted","Data":"887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793"} Nov 26 16:02:02 crc kubenswrapper[5010]: I1126 16:02:02.944097 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-r26tw" podStartSLOduration=2.473462284 podStartE2EDuration="4.944076962s" podCreationTimestamp="2025-11-26 16:01:58 +0000 UTC" firstStartedPulling="2025-11-26 16:01:59.889441467 +0000 UTC m=+2140.680158615" lastFinishedPulling="2025-11-26 16:02:02.360056145 +0000 UTC m=+2143.150773293" observedRunningTime="2025-11-26 16:02:02.937691053 +0000 UTC m=+2143.728408201" watchObservedRunningTime="2025-11-26 16:02:02.944076962 +0000 UTC m=+2143.734794110" Nov 26 16:02:03 crc kubenswrapper[5010]: I1126 16:02:03.861468 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:02:03 crc kubenswrapper[5010]: I1126 16:02:03.861888 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:02:03 crc kubenswrapper[5010]: I1126 16:02:03.930133 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6m9x5" event={"ID":"485a9756-150d-4d43-b0c9-981705bec296","Type":"ContainerStarted","Data":"bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33"} Nov 26 16:02:04 crc kubenswrapper[5010]: I1126 16:02:04.906146 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-2r2t6" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="registry-server" probeResult="failure" output=< Nov 26 16:02:04 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 16:02:04 crc kubenswrapper[5010]: > Nov 26 16:02:04 crc kubenswrapper[5010]: I1126 16:02:04.938940 5010 generic.go:334] "Generic (PLEG): container finished" podID="485a9756-150d-4d43-b0c9-981705bec296" containerID="bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33" exitCode=0 Nov 26 16:02:04 crc kubenswrapper[5010]: I1126 16:02:04.938998 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6m9x5" event={"ID":"485a9756-150d-4d43-b0c9-981705bec296","Type":"ContainerDied","Data":"bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33"} Nov 26 16:02:06 crc kubenswrapper[5010]: I1126 16:02:06.979207 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6m9x5" event={"ID":"485a9756-150d-4d43-b0c9-981705bec296","Type":"ContainerStarted","Data":"092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d"} Nov 26 16:02:07 crc kubenswrapper[5010]: I1126 16:02:07.003599 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6m9x5" podStartSLOduration=2.788714435 podStartE2EDuration="7.003584739s" podCreationTimestamp="2025-11-26 16:02:00 +0000 UTC" firstStartedPulling="2025-11-26 16:02:01.911071749 +0000 UTC m=+2142.701788897" lastFinishedPulling="2025-11-26 16:02:06.125942043 +0000 UTC m=+2146.916659201" observedRunningTime="2025-11-26 16:02:07.000222605 +0000 UTC m=+2147.790939783" watchObservedRunningTime="2025-11-26 16:02:07.003584739 +0000 UTC m=+2147.794301887" Nov 26 16:02:08 crc kubenswrapper[5010]: I1126 16:02:08.387733 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:02:08 crc kubenswrapper[5010]: I1126 16:02:08.387774 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:02:08 crc kubenswrapper[5010]: I1126 16:02:08.453877 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:02:09 crc kubenswrapper[5010]: I1126 16:02:09.060611 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:02:09 crc kubenswrapper[5010]: I1126 16:02:09.860205 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r26tw"] Nov 26 16:02:10 crc kubenswrapper[5010]: I1126 16:02:10.584102 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:10 crc kubenswrapper[5010]: I1126 16:02:10.584406 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:10 crc kubenswrapper[5010]: I1126 16:02:10.630776 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.011476 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-r26tw" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="registry-server" containerID="cri-o://887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793" gracePeriod=2 Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.094214 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.425494 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.425934 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.514612 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.547103 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-utilities\") pod \"b1488022-08ea-4c5d-ac17-4245d2bd543c\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.547233 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktcdg\" (UniqueName: \"kubernetes.io/projected/b1488022-08ea-4c5d-ac17-4245d2bd543c-kube-api-access-ktcdg\") pod \"b1488022-08ea-4c5d-ac17-4245d2bd543c\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.547263 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-catalog-content\") pod \"b1488022-08ea-4c5d-ac17-4245d2bd543c\" (UID: \"b1488022-08ea-4c5d-ac17-4245d2bd543c\") " Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.548449 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-utilities" (OuterVolumeSpecName: "utilities") pod "b1488022-08ea-4c5d-ac17-4245d2bd543c" (UID: "b1488022-08ea-4c5d-ac17-4245d2bd543c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.556141 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1488022-08ea-4c5d-ac17-4245d2bd543c-kube-api-access-ktcdg" (OuterVolumeSpecName: "kube-api-access-ktcdg") pod "b1488022-08ea-4c5d-ac17-4245d2bd543c" (UID: "b1488022-08ea-4c5d-ac17-4245d2bd543c"). InnerVolumeSpecName "kube-api-access-ktcdg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.565160 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b1488022-08ea-4c5d-ac17-4245d2bd543c" (UID: "b1488022-08ea-4c5d-ac17-4245d2bd543c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.648932 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktcdg\" (UniqueName: \"kubernetes.io/projected/b1488022-08ea-4c5d-ac17-4245d2bd543c-kube-api-access-ktcdg\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.648999 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:11 crc kubenswrapper[5010]: I1126 16:02:11.649017 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1488022-08ea-4c5d-ac17-4245d2bd543c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.023569 5010 generic.go:334] "Generic (PLEG): container finished" podID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerID="887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793" exitCode=0 Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.024678 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-r26tw" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.024807 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r26tw" event={"ID":"b1488022-08ea-4c5d-ac17-4245d2bd543c","Type":"ContainerDied","Data":"887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793"} Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.024850 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-r26tw" event={"ID":"b1488022-08ea-4c5d-ac17-4245d2bd543c","Type":"ContainerDied","Data":"e8e7b8f3bc9363aa32788bea022b800c81fdd09b68bad352003c7eb665b541f2"} Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.024877 5010 scope.go:117] "RemoveContainer" containerID="887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.056455 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-r26tw"] Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.058152 5010 scope.go:117] "RemoveContainer" containerID="ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.063954 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-r26tw"] Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.077602 5010 scope.go:117] "RemoveContainer" containerID="fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.114991 5010 scope.go:117] "RemoveContainer" containerID="887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793" Nov 26 16:02:12 crc kubenswrapper[5010]: E1126 16:02:12.115517 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793\": container with ID starting with 887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793 not found: ID does not exist" containerID="887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.115578 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793"} err="failed to get container status \"887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793\": rpc error: code = NotFound desc = could not find container \"887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793\": container with ID starting with 887be285f3046aeeb67fee00333369c6da7d834db6bba172c7d6483020041793 not found: ID does not exist" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.115612 5010 scope.go:117] "RemoveContainer" containerID="ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3" Nov 26 16:02:12 crc kubenswrapper[5010]: E1126 16:02:12.115906 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3\": container with ID starting with ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3 not found: ID does not exist" containerID="ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.115934 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3"} err="failed to get container status \"ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3\": rpc error: code = NotFound desc = could not find container \"ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3\": container with ID starting with ca331abf778a72430d17f097a99304eac5d80a790f923f5bff3aeb8b40f289d3 not found: ID does not exist" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.115955 5010 scope.go:117] "RemoveContainer" containerID="fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811" Nov 26 16:02:12 crc kubenswrapper[5010]: E1126 16:02:12.116307 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811\": container with ID starting with fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811 not found: ID does not exist" containerID="fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811" Nov 26 16:02:12 crc kubenswrapper[5010]: I1126 16:02:12.116372 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811"} err="failed to get container status \"fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811\": rpc error: code = NotFound desc = could not find container \"fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811\": container with ID starting with fa057ca55c3fafa74ad277f539e35be5961c57edfd2fc6815dcba2dda7c01811 not found: ID does not exist" Nov 26 16:02:13 crc kubenswrapper[5010]: I1126 16:02:13.461580 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6m9x5"] Nov 26 16:02:13 crc kubenswrapper[5010]: I1126 16:02:13.901039 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" path="/var/lib/kubelet/pods/b1488022-08ea-4c5d-ac17-4245d2bd543c/volumes" Nov 26 16:02:13 crc kubenswrapper[5010]: I1126 16:02:13.932550 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:02:13 crc kubenswrapper[5010]: I1126 16:02:13.996880 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.041861 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6m9x5" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="registry-server" containerID="cri-o://092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d" gracePeriod=2 Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.494241 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.592951 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jkqj\" (UniqueName: \"kubernetes.io/projected/485a9756-150d-4d43-b0c9-981705bec296-kube-api-access-6jkqj\") pod \"485a9756-150d-4d43-b0c9-981705bec296\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.593029 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-utilities\") pod \"485a9756-150d-4d43-b0c9-981705bec296\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.593073 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-catalog-content\") pod \"485a9756-150d-4d43-b0c9-981705bec296\" (UID: \"485a9756-150d-4d43-b0c9-981705bec296\") " Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.594963 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-utilities" (OuterVolumeSpecName: "utilities") pod "485a9756-150d-4d43-b0c9-981705bec296" (UID: "485a9756-150d-4d43-b0c9-981705bec296"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.611685 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/485a9756-150d-4d43-b0c9-981705bec296-kube-api-access-6jkqj" (OuterVolumeSpecName: "kube-api-access-6jkqj") pod "485a9756-150d-4d43-b0c9-981705bec296" (UID: "485a9756-150d-4d43-b0c9-981705bec296"). InnerVolumeSpecName "kube-api-access-6jkqj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.695319 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jkqj\" (UniqueName: \"kubernetes.io/projected/485a9756-150d-4d43-b0c9-981705bec296-kube-api-access-6jkqj\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.695371 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.887999 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xmzqt"] Nov 26 16:02:14 crc kubenswrapper[5010]: E1126 16:02:14.888501 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="extract-content" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.888527 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="extract-content" Nov 26 16:02:14 crc kubenswrapper[5010]: E1126 16:02:14.888563 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="registry-server" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.888577 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="registry-server" Nov 26 16:02:14 crc kubenswrapper[5010]: E1126 16:02:14.888596 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="extract-utilities" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.888610 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="extract-utilities" Nov 26 16:02:14 crc kubenswrapper[5010]: E1126 16:02:14.888641 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="extract-utilities" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.888653 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="extract-utilities" Nov 26 16:02:14 crc kubenswrapper[5010]: E1126 16:02:14.888690 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="registry-server" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.888702 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="registry-server" Nov 26 16:02:14 crc kubenswrapper[5010]: E1126 16:02:14.888751 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="extract-content" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.888764 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="extract-content" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.889008 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1488022-08ea-4c5d-ac17-4245d2bd543c" containerName="registry-server" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.889026 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="485a9756-150d-4d43-b0c9-981705bec296" containerName="registry-server" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.891401 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:14 crc kubenswrapper[5010]: I1126 16:02:14.899353 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmzqt"] Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.001563 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlhnc\" (UniqueName: \"kubernetes.io/projected/4ba47c17-c309-478f-b97c-860a0926227b-kube-api-access-mlhnc\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.001651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-utilities\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.001703 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-catalog-content\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.053984 5010 generic.go:334] "Generic (PLEG): container finished" podID="485a9756-150d-4d43-b0c9-981705bec296" containerID="092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d" exitCode=0 Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.054033 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6m9x5" event={"ID":"485a9756-150d-4d43-b0c9-981705bec296","Type":"ContainerDied","Data":"092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d"} Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.054071 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6m9x5" event={"ID":"485a9756-150d-4d43-b0c9-981705bec296","Type":"ContainerDied","Data":"7630fb483ea996c3cab1f8b96acb3ba3bd89faa867895bce37545cc3a325ed07"} Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.054093 5010 scope.go:117] "RemoveContainer" containerID="092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.054094 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6m9x5" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.074591 5010 scope.go:117] "RemoveContainer" containerID="bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.093610 5010 scope.go:117] "RemoveContainer" containerID="90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.102692 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-utilities\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.102775 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-catalog-content\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.102858 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlhnc\" (UniqueName: \"kubernetes.io/projected/4ba47c17-c309-478f-b97c-860a0926227b-kube-api-access-mlhnc\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.103351 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-catalog-content\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.103458 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-utilities\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.124088 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlhnc\" (UniqueName: \"kubernetes.io/projected/4ba47c17-c309-478f-b97c-860a0926227b-kube-api-access-mlhnc\") pod \"redhat-marketplace-xmzqt\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.138658 5010 scope.go:117] "RemoveContainer" containerID="092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d" Nov 26 16:02:15 crc kubenswrapper[5010]: E1126 16:02:15.139255 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d\": container with ID starting with 092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d not found: ID does not exist" containerID="092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.139291 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d"} err="failed to get container status \"092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d\": rpc error: code = NotFound desc = could not find container \"092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d\": container with ID starting with 092dc4011b46d895859ad3d47df904b423c96838a607c28e56623ed4ec8b298d not found: ID does not exist" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.139311 5010 scope.go:117] "RemoveContainer" containerID="bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33" Nov 26 16:02:15 crc kubenswrapper[5010]: E1126 16:02:15.139619 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33\": container with ID starting with bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33 not found: ID does not exist" containerID="bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.139642 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33"} err="failed to get container status \"bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33\": rpc error: code = NotFound desc = could not find container \"bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33\": container with ID starting with bbc51e6db2bc5641aef39f05961bc84ebc3ae2e3235599ecb41ec3b17fb91c33 not found: ID does not exist" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.139657 5010 scope.go:117] "RemoveContainer" containerID="90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec" Nov 26 16:02:15 crc kubenswrapper[5010]: E1126 16:02:15.140016 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec\": container with ID starting with 90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec not found: ID does not exist" containerID="90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.140069 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec"} err="failed to get container status \"90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec\": rpc error: code = NotFound desc = could not find container \"90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec\": container with ID starting with 90f4bcf168be25e748ba87195355fd50d9ebb2c914fc56648086149a3bc3d8ec not found: ID does not exist" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.219496 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.223327 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "485a9756-150d-4d43-b0c9-981705bec296" (UID: "485a9756-150d-4d43-b0c9-981705bec296"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.305739 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/485a9756-150d-4d43-b0c9-981705bec296-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.386638 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6m9x5"] Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.394072 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6m9x5"] Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.622772 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmzqt"] Nov 26 16:02:15 crc kubenswrapper[5010]: I1126 16:02:15.901350 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="485a9756-150d-4d43-b0c9-981705bec296" path="/var/lib/kubelet/pods/485a9756-150d-4d43-b0c9-981705bec296/volumes" Nov 26 16:02:16 crc kubenswrapper[5010]: I1126 16:02:16.064060 5010 generic.go:334] "Generic (PLEG): container finished" podID="4ba47c17-c309-478f-b97c-860a0926227b" containerID="a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2" exitCode=0 Nov 26 16:02:16 crc kubenswrapper[5010]: I1126 16:02:16.064169 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmzqt" event={"ID":"4ba47c17-c309-478f-b97c-860a0926227b","Type":"ContainerDied","Data":"a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2"} Nov 26 16:02:16 crc kubenswrapper[5010]: I1126 16:02:16.064200 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmzqt" event={"ID":"4ba47c17-c309-478f-b97c-860a0926227b","Type":"ContainerStarted","Data":"a5ab8d2a6bea3a8b91ba13b52787b3c528f144453a6eecc15bf7fa5d95c1c0f4"} Nov 26 16:02:16 crc kubenswrapper[5010]: I1126 16:02:16.067630 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:02:18 crc kubenswrapper[5010]: I1126 16:02:18.853670 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2r2t6"] Nov 26 16:02:18 crc kubenswrapper[5010]: I1126 16:02:18.854325 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2r2t6" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="registry-server" containerID="cri-o://7513293663b4c6ca0e0c5e0ffd97f6124876cf236799c6113679fbe13d05edc6" gracePeriod=2 Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.093666 5010 generic.go:334] "Generic (PLEG): container finished" podID="4ba47c17-c309-478f-b97c-860a0926227b" containerID="4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c" exitCode=0 Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.093753 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmzqt" event={"ID":"4ba47c17-c309-478f-b97c-860a0926227b","Type":"ContainerDied","Data":"4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c"} Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.096682 5010 generic.go:334] "Generic (PLEG): container finished" podID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerID="7513293663b4c6ca0e0c5e0ffd97f6124876cf236799c6113679fbe13d05edc6" exitCode=0 Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.096799 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2r2t6" event={"ID":"fb39991b-2acf-4bbf-a02c-8d8656fcb79d","Type":"ContainerDied","Data":"7513293663b4c6ca0e0c5e0ffd97f6124876cf236799c6113679fbe13d05edc6"} Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.241843 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.259452 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-utilities\") pod \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.259521 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-catalog-content\") pod \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.259585 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7h8lz\" (UniqueName: \"kubernetes.io/projected/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-kube-api-access-7h8lz\") pod \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\" (UID: \"fb39991b-2acf-4bbf-a02c-8d8656fcb79d\") " Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.260848 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-utilities" (OuterVolumeSpecName: "utilities") pod "fb39991b-2acf-4bbf-a02c-8d8656fcb79d" (UID: "fb39991b-2acf-4bbf-a02c-8d8656fcb79d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.266784 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-kube-api-access-7h8lz" (OuterVolumeSpecName: "kube-api-access-7h8lz") pod "fb39991b-2acf-4bbf-a02c-8d8656fcb79d" (UID: "fb39991b-2acf-4bbf-a02c-8d8656fcb79d"). InnerVolumeSpecName "kube-api-access-7h8lz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.359509 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fb39991b-2acf-4bbf-a02c-8d8656fcb79d" (UID: "fb39991b-2acf-4bbf-a02c-8d8656fcb79d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.369761 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.369809 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:19 crc kubenswrapper[5010]: I1126 16:02:19.369849 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7h8lz\" (UniqueName: \"kubernetes.io/projected/fb39991b-2acf-4bbf-a02c-8d8656fcb79d-kube-api-access-7h8lz\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.105880 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2r2t6" event={"ID":"fb39991b-2acf-4bbf-a02c-8d8656fcb79d","Type":"ContainerDied","Data":"79796b4ba4be96d824597b75e780e2db1775a5975aee5fbb8e19adeb0f4e685b"} Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.106126 5010 scope.go:117] "RemoveContainer" containerID="7513293663b4c6ca0e0c5e0ffd97f6124876cf236799c6113679fbe13d05edc6" Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.106251 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2r2t6" Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.110207 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmzqt" event={"ID":"4ba47c17-c309-478f-b97c-860a0926227b","Type":"ContainerStarted","Data":"55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d"} Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.122129 5010 scope.go:117] "RemoveContainer" containerID="fb7f6a111f987f2e4fb7adbb7e4aa924d164e8679c5f88c9e84ae867cfb27195" Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.140483 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xmzqt" podStartSLOduration=2.5323095799999997 podStartE2EDuration="6.140468701s" podCreationTimestamp="2025-11-26 16:02:14 +0000 UTC" firstStartedPulling="2025-11-26 16:02:16.067358244 +0000 UTC m=+2156.858075402" lastFinishedPulling="2025-11-26 16:02:19.675517385 +0000 UTC m=+2160.466234523" observedRunningTime="2025-11-26 16:02:20.138252265 +0000 UTC m=+2160.928969413" watchObservedRunningTime="2025-11-26 16:02:20.140468701 +0000 UTC m=+2160.931185849" Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.143316 5010 scope.go:117] "RemoveContainer" containerID="9514eb4164e109b5c32b5977082c81d8e6a99df5b215a9aac88b742143c53341" Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.162407 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2r2t6"] Nov 26 16:02:20 crc kubenswrapper[5010]: I1126 16:02:20.170692 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2r2t6"] Nov 26 16:02:21 crc kubenswrapper[5010]: I1126 16:02:21.905097 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" path="/var/lib/kubelet/pods/fb39991b-2acf-4bbf-a02c-8d8656fcb79d/volumes" Nov 26 16:02:25 crc kubenswrapper[5010]: I1126 16:02:25.220038 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:25 crc kubenswrapper[5010]: I1126 16:02:25.220458 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:25 crc kubenswrapper[5010]: I1126 16:02:25.296227 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:26 crc kubenswrapper[5010]: I1126 16:02:26.202274 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:26 crc kubenswrapper[5010]: I1126 16:02:26.661835 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmzqt"] Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.168104 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xmzqt" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="registry-server" containerID="cri-o://55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d" gracePeriod=2 Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.667084 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.839782 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-catalog-content\") pod \"4ba47c17-c309-478f-b97c-860a0926227b\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.839872 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-utilities\") pod \"4ba47c17-c309-478f-b97c-860a0926227b\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.839943 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlhnc\" (UniqueName: \"kubernetes.io/projected/4ba47c17-c309-478f-b97c-860a0926227b-kube-api-access-mlhnc\") pod \"4ba47c17-c309-478f-b97c-860a0926227b\" (UID: \"4ba47c17-c309-478f-b97c-860a0926227b\") " Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.841319 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-utilities" (OuterVolumeSpecName: "utilities") pod "4ba47c17-c309-478f-b97c-860a0926227b" (UID: "4ba47c17-c309-478f-b97c-860a0926227b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.846271 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ba47c17-c309-478f-b97c-860a0926227b-kube-api-access-mlhnc" (OuterVolumeSpecName: "kube-api-access-mlhnc") pod "4ba47c17-c309-478f-b97c-860a0926227b" (UID: "4ba47c17-c309-478f-b97c-860a0926227b"). InnerVolumeSpecName "kube-api-access-mlhnc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.863199 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ba47c17-c309-478f-b97c-860a0926227b" (UID: "4ba47c17-c309-478f-b97c-860a0926227b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.942214 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.942269 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ba47c17-c309-478f-b97c-860a0926227b-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:28 crc kubenswrapper[5010]: I1126 16:02:28.942292 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlhnc\" (UniqueName: \"kubernetes.io/projected/4ba47c17-c309-478f-b97c-860a0926227b-kube-api-access-mlhnc\") on node \"crc\" DevicePath \"\"" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.180096 5010 generic.go:334] "Generic (PLEG): container finished" podID="4ba47c17-c309-478f-b97c-860a0926227b" containerID="55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d" exitCode=0 Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.180161 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmzqt" event={"ID":"4ba47c17-c309-478f-b97c-860a0926227b","Type":"ContainerDied","Data":"55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d"} Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.180193 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xmzqt" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.180204 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xmzqt" event={"ID":"4ba47c17-c309-478f-b97c-860a0926227b","Type":"ContainerDied","Data":"a5ab8d2a6bea3a8b91ba13b52787b3c528f144453a6eecc15bf7fa5d95c1c0f4"} Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.180222 5010 scope.go:117] "RemoveContainer" containerID="55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.204008 5010 scope.go:117] "RemoveContainer" containerID="4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.247296 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmzqt"] Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.247751 5010 scope.go:117] "RemoveContainer" containerID="a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.259890 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xmzqt"] Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.268041 5010 scope.go:117] "RemoveContainer" containerID="55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d" Nov 26 16:02:29 crc kubenswrapper[5010]: E1126 16:02:29.268689 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d\": container with ID starting with 55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d not found: ID does not exist" containerID="55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.268747 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d"} err="failed to get container status \"55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d\": rpc error: code = NotFound desc = could not find container \"55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d\": container with ID starting with 55996c4ef6577e2ca0794e750c6b2a75f605abc9a744afbb89100c0a37c58b9d not found: ID does not exist" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.268777 5010 scope.go:117] "RemoveContainer" containerID="4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c" Nov 26 16:02:29 crc kubenswrapper[5010]: E1126 16:02:29.269216 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c\": container with ID starting with 4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c not found: ID does not exist" containerID="4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.269237 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c"} err="failed to get container status \"4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c\": rpc error: code = NotFound desc = could not find container \"4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c\": container with ID starting with 4219316425a98199c12785e0915f5552fc5a195bef0856cf0f7b0c7b02502c2c not found: ID does not exist" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.269257 5010 scope.go:117] "RemoveContainer" containerID="a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2" Nov 26 16:02:29 crc kubenswrapper[5010]: E1126 16:02:29.269500 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2\": container with ID starting with a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2 not found: ID does not exist" containerID="a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.269535 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2"} err="failed to get container status \"a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2\": rpc error: code = NotFound desc = could not find container \"a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2\": container with ID starting with a752b5d9d7cd27977553dfc240ce976ab1a3df5f2f0186a13114b8d6fa90caf2 not found: ID does not exist" Nov 26 16:02:29 crc kubenswrapper[5010]: I1126 16:02:29.904902 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ba47c17-c309-478f-b97c-860a0926227b" path="/var/lib/kubelet/pods/4ba47c17-c309-478f-b97c-860a0926227b/volumes" Nov 26 16:02:41 crc kubenswrapper[5010]: I1126 16:02:41.422475 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:02:41 crc kubenswrapper[5010]: I1126 16:02:41.423067 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:03:11 crc kubenswrapper[5010]: I1126 16:03:11.422692 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:03:11 crc kubenswrapper[5010]: I1126 16:03:11.423377 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:03:11 crc kubenswrapper[5010]: I1126 16:03:11.423434 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:03:11 crc kubenswrapper[5010]: I1126 16:03:11.424292 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:03:11 crc kubenswrapper[5010]: I1126 16:03:11.424392 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" gracePeriod=600 Nov 26 16:03:12 crc kubenswrapper[5010]: E1126 16:03:12.076442 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:03:12 crc kubenswrapper[5010]: I1126 16:03:12.594448 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" exitCode=0 Nov 26 16:03:12 crc kubenswrapper[5010]: I1126 16:03:12.594536 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f"} Nov 26 16:03:12 crc kubenswrapper[5010]: I1126 16:03:12.594631 5010 scope.go:117] "RemoveContainer" containerID="238c759f629d5116acbbb07eb94cd9109f06028f6b7ad27094247fc4ae32e555" Nov 26 16:03:12 crc kubenswrapper[5010]: I1126 16:03:12.596383 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:03:12 crc kubenswrapper[5010]: E1126 16:03:12.596970 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:03:24 crc kubenswrapper[5010]: I1126 16:03:24.892272 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:03:24 crc kubenswrapper[5010]: E1126 16:03:24.893379 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:03:37 crc kubenswrapper[5010]: I1126 16:03:37.892045 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:03:37 crc kubenswrapper[5010]: E1126 16:03:37.892997 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:03:49 crc kubenswrapper[5010]: I1126 16:03:49.898015 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:03:49 crc kubenswrapper[5010]: E1126 16:03:49.898973 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:04:03 crc kubenswrapper[5010]: I1126 16:04:03.892631 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:04:03 crc kubenswrapper[5010]: E1126 16:04:03.895021 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:04:18 crc kubenswrapper[5010]: I1126 16:04:18.892000 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:04:18 crc kubenswrapper[5010]: E1126 16:04:18.893309 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:04:32 crc kubenswrapper[5010]: I1126 16:04:32.892136 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:04:32 crc kubenswrapper[5010]: E1126 16:04:32.893019 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:04:44 crc kubenswrapper[5010]: I1126 16:04:44.891676 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:04:44 crc kubenswrapper[5010]: E1126 16:04:44.892209 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:04:57 crc kubenswrapper[5010]: I1126 16:04:57.892580 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:04:57 crc kubenswrapper[5010]: E1126 16:04:57.896161 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:05:08 crc kubenswrapper[5010]: I1126 16:05:08.892025 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:05:08 crc kubenswrapper[5010]: E1126 16:05:08.893270 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:05:22 crc kubenswrapper[5010]: I1126 16:05:22.892844 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:05:22 crc kubenswrapper[5010]: E1126 16:05:22.893790 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:05:36 crc kubenswrapper[5010]: I1126 16:05:36.891186 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:05:36 crc kubenswrapper[5010]: E1126 16:05:36.891882 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:05:48 crc kubenswrapper[5010]: I1126 16:05:48.891876 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:05:48 crc kubenswrapper[5010]: E1126 16:05:48.894691 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:05:59 crc kubenswrapper[5010]: I1126 16:05:59.900559 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:05:59 crc kubenswrapper[5010]: E1126 16:05:59.901556 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:06:13 crc kubenswrapper[5010]: I1126 16:06:13.891973 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:06:13 crc kubenswrapper[5010]: E1126 16:06:13.892971 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:06:26 crc kubenswrapper[5010]: I1126 16:06:26.892527 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:06:26 crc kubenswrapper[5010]: E1126 16:06:26.893670 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:06:37 crc kubenswrapper[5010]: I1126 16:06:37.892056 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:06:37 crc kubenswrapper[5010]: E1126 16:06:37.892871 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:06:51 crc kubenswrapper[5010]: I1126 16:06:51.892159 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:06:51 crc kubenswrapper[5010]: E1126 16:06:51.893143 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:07:03 crc kubenswrapper[5010]: I1126 16:07:03.891686 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:07:03 crc kubenswrapper[5010]: E1126 16:07:03.892864 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.238333 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-htztz"] Nov 26 16:07:13 crc kubenswrapper[5010]: E1126 16:07:13.239241 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="extract-content" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239254 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="extract-content" Nov 26 16:07:13 crc kubenswrapper[5010]: E1126 16:07:13.239266 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="registry-server" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239272 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="registry-server" Nov 26 16:07:13 crc kubenswrapper[5010]: E1126 16:07:13.239285 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="extract-content" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239291 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="extract-content" Nov 26 16:07:13 crc kubenswrapper[5010]: E1126 16:07:13.239304 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="extract-utilities" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239310 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="extract-utilities" Nov 26 16:07:13 crc kubenswrapper[5010]: E1126 16:07:13.239322 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="extract-utilities" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239328 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="extract-utilities" Nov 26 16:07:13 crc kubenswrapper[5010]: E1126 16:07:13.239340 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="registry-server" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239346 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="registry-server" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239485 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb39991b-2acf-4bbf-a02c-8d8656fcb79d" containerName="registry-server" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.239503 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ba47c17-c309-478f-b97c-860a0926227b" containerName="registry-server" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.240511 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.277419 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-htztz"] Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.287070 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-catalog-content\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.287160 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dd849\" (UniqueName: \"kubernetes.io/projected/892ee660-1736-4c75-a99d-a016a15f0257-kube-api-access-dd849\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.287218 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-utilities\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.389156 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-catalog-content\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.389601 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dd849\" (UniqueName: \"kubernetes.io/projected/892ee660-1736-4c75-a99d-a016a15f0257-kube-api-access-dd849\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.389870 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-utilities\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.389878 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-catalog-content\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.390473 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-utilities\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.410331 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dd849\" (UniqueName: \"kubernetes.io/projected/892ee660-1736-4c75-a99d-a016a15f0257-kube-api-access-dd849\") pod \"community-operators-htztz\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:13 crc kubenswrapper[5010]: I1126 16:07:13.565223 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:14 crc kubenswrapper[5010]: I1126 16:07:14.074058 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-htztz"] Nov 26 16:07:14 crc kubenswrapper[5010]: I1126 16:07:14.877724 5010 generic.go:334] "Generic (PLEG): container finished" podID="892ee660-1736-4c75-a99d-a016a15f0257" containerID="fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96" exitCode=0 Nov 26 16:07:14 crc kubenswrapper[5010]: I1126 16:07:14.877787 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htztz" event={"ID":"892ee660-1736-4c75-a99d-a016a15f0257","Type":"ContainerDied","Data":"fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96"} Nov 26 16:07:14 crc kubenswrapper[5010]: I1126 16:07:14.878042 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htztz" event={"ID":"892ee660-1736-4c75-a99d-a016a15f0257","Type":"ContainerStarted","Data":"cc9f15df9cd538c1656874c086e0ad0bd302f7795045fd5fe24711a1208f71f2"} Nov 26 16:07:14 crc kubenswrapper[5010]: I1126 16:07:14.892371 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:07:14 crc kubenswrapper[5010]: E1126 16:07:14.892655 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:07:17 crc kubenswrapper[5010]: I1126 16:07:17.909596 5010 generic.go:334] "Generic (PLEG): container finished" podID="892ee660-1736-4c75-a99d-a016a15f0257" containerID="f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30" exitCode=0 Nov 26 16:07:17 crc kubenswrapper[5010]: I1126 16:07:17.914841 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:07:17 crc kubenswrapper[5010]: I1126 16:07:17.915571 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htztz" event={"ID":"892ee660-1736-4c75-a99d-a016a15f0257","Type":"ContainerDied","Data":"f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30"} Nov 26 16:07:18 crc kubenswrapper[5010]: I1126 16:07:18.927753 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htztz" event={"ID":"892ee660-1736-4c75-a99d-a016a15f0257","Type":"ContainerStarted","Data":"30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9"} Nov 26 16:07:18 crc kubenswrapper[5010]: I1126 16:07:18.953930 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-htztz" podStartSLOduration=2.458008233 podStartE2EDuration="5.953895892s" podCreationTimestamp="2025-11-26 16:07:13 +0000 UTC" firstStartedPulling="2025-11-26 16:07:14.880483254 +0000 UTC m=+2455.671200402" lastFinishedPulling="2025-11-26 16:07:18.376370903 +0000 UTC m=+2459.167088061" observedRunningTime="2025-11-26 16:07:18.953242996 +0000 UTC m=+2459.743960164" watchObservedRunningTime="2025-11-26 16:07:18.953895892 +0000 UTC m=+2459.744613160" Nov 26 16:07:23 crc kubenswrapper[5010]: I1126 16:07:23.565426 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:23 crc kubenswrapper[5010]: I1126 16:07:23.566377 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:23 crc kubenswrapper[5010]: I1126 16:07:23.643916 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:24 crc kubenswrapper[5010]: I1126 16:07:24.027993 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:24 crc kubenswrapper[5010]: I1126 16:07:24.090843 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-htztz"] Nov 26 16:07:25 crc kubenswrapper[5010]: I1126 16:07:25.989999 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-htztz" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="registry-server" containerID="cri-o://30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9" gracePeriod=2 Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.359445 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.389920 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-utilities\") pod \"892ee660-1736-4c75-a99d-a016a15f0257\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.390041 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dd849\" (UniqueName: \"kubernetes.io/projected/892ee660-1736-4c75-a99d-a016a15f0257-kube-api-access-dd849\") pod \"892ee660-1736-4c75-a99d-a016a15f0257\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.390109 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-catalog-content\") pod \"892ee660-1736-4c75-a99d-a016a15f0257\" (UID: \"892ee660-1736-4c75-a99d-a016a15f0257\") " Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.390984 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-utilities" (OuterVolumeSpecName: "utilities") pod "892ee660-1736-4c75-a99d-a016a15f0257" (UID: "892ee660-1736-4c75-a99d-a016a15f0257"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.398182 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/892ee660-1736-4c75-a99d-a016a15f0257-kube-api-access-dd849" (OuterVolumeSpecName: "kube-api-access-dd849") pod "892ee660-1736-4c75-a99d-a016a15f0257" (UID: "892ee660-1736-4c75-a99d-a016a15f0257"). InnerVolumeSpecName "kube-api-access-dd849". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.473765 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "892ee660-1736-4c75-a99d-a016a15f0257" (UID: "892ee660-1736-4c75-a99d-a016a15f0257"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.491778 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.491832 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dd849\" (UniqueName: \"kubernetes.io/projected/892ee660-1736-4c75-a99d-a016a15f0257-kube-api-access-dd849\") on node \"crc\" DevicePath \"\"" Nov 26 16:07:26 crc kubenswrapper[5010]: I1126 16:07:26.491851 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/892ee660-1736-4c75-a99d-a016a15f0257-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.003070 5010 generic.go:334] "Generic (PLEG): container finished" podID="892ee660-1736-4c75-a99d-a016a15f0257" containerID="30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9" exitCode=0 Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.003146 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htztz" event={"ID":"892ee660-1736-4c75-a99d-a016a15f0257","Type":"ContainerDied","Data":"30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9"} Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.003156 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-htztz" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.003187 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-htztz" event={"ID":"892ee660-1736-4c75-a99d-a016a15f0257","Type":"ContainerDied","Data":"cc9f15df9cd538c1656874c086e0ad0bd302f7795045fd5fe24711a1208f71f2"} Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.003219 5010 scope.go:117] "RemoveContainer" containerID="30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.023149 5010 scope.go:117] "RemoveContainer" containerID="f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.053219 5010 scope.go:117] "RemoveContainer" containerID="fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.064307 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-htztz"] Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.077139 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-htztz"] Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.081540 5010 scope.go:117] "RemoveContainer" containerID="30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9" Nov 26 16:07:27 crc kubenswrapper[5010]: E1126 16:07:27.081954 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9\": container with ID starting with 30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9 not found: ID does not exist" containerID="30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.082004 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9"} err="failed to get container status \"30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9\": rpc error: code = NotFound desc = could not find container \"30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9\": container with ID starting with 30e4263ad7d3cb7c9a29a88711377069a0124918a44c7566278049c7194652f9 not found: ID does not exist" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.082035 5010 scope.go:117] "RemoveContainer" containerID="f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30" Nov 26 16:07:27 crc kubenswrapper[5010]: E1126 16:07:27.082302 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30\": container with ID starting with f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30 not found: ID does not exist" containerID="f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.082334 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30"} err="failed to get container status \"f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30\": rpc error: code = NotFound desc = could not find container \"f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30\": container with ID starting with f982c0f5cfd257c43fb177b4a65c0afc51feecc736bb35a63adfb5cb45974a30 not found: ID does not exist" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.082359 5010 scope.go:117] "RemoveContainer" containerID="fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96" Nov 26 16:07:27 crc kubenswrapper[5010]: E1126 16:07:27.082571 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96\": container with ID starting with fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96 not found: ID does not exist" containerID="fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.082600 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96"} err="failed to get container status \"fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96\": rpc error: code = NotFound desc = could not find container \"fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96\": container with ID starting with fce3badce5a55d52079ab872c056012fb6f9bd3b0b72a82288ad4c20729b9f96 not found: ID does not exist" Nov 26 16:07:27 crc kubenswrapper[5010]: I1126 16:07:27.903398 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="892ee660-1736-4c75-a99d-a016a15f0257" path="/var/lib/kubelet/pods/892ee660-1736-4c75-a99d-a016a15f0257/volumes" Nov 26 16:07:29 crc kubenswrapper[5010]: I1126 16:07:29.900993 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:07:29 crc kubenswrapper[5010]: E1126 16:07:29.901954 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:07:43 crc kubenswrapper[5010]: I1126 16:07:43.891651 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:07:43 crc kubenswrapper[5010]: E1126 16:07:43.892625 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:07:54 crc kubenswrapper[5010]: I1126 16:07:54.892420 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:07:54 crc kubenswrapper[5010]: E1126 16:07:54.893657 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:08:07 crc kubenswrapper[5010]: I1126 16:08:07.891441 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:08:07 crc kubenswrapper[5010]: E1126 16:08:07.892346 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:08:22 crc kubenswrapper[5010]: I1126 16:08:22.891953 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:08:23 crc kubenswrapper[5010]: I1126 16:08:23.548485 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"a4a5c63b6ed2cecf0e4ccfd02ca275e191e0443e6d00734a90f0a7fbd37328b0"} Nov 26 16:10:41 crc kubenswrapper[5010]: I1126 16:10:41.422376 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:10:41 crc kubenswrapper[5010]: I1126 16:10:41.423087 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:11:11 crc kubenswrapper[5010]: I1126 16:11:11.423155 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:11:11 crc kubenswrapper[5010]: I1126 16:11:11.423777 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:11:41 crc kubenswrapper[5010]: I1126 16:11:41.422871 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:11:41 crc kubenswrapper[5010]: I1126 16:11:41.424335 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:11:41 crc kubenswrapper[5010]: I1126 16:11:41.424467 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:11:41 crc kubenswrapper[5010]: I1126 16:11:41.425119 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a4a5c63b6ed2cecf0e4ccfd02ca275e191e0443e6d00734a90f0a7fbd37328b0"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:11:41 crc kubenswrapper[5010]: I1126 16:11:41.425282 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://a4a5c63b6ed2cecf0e4ccfd02ca275e191e0443e6d00734a90f0a7fbd37328b0" gracePeriod=600 Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.316855 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="a4a5c63b6ed2cecf0e4ccfd02ca275e191e0443e6d00734a90f0a7fbd37328b0" exitCode=0 Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.316959 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"a4a5c63b6ed2cecf0e4ccfd02ca275e191e0443e6d00734a90f0a7fbd37328b0"} Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.317495 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20"} Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.317527 5010 scope.go:117] "RemoveContainer" containerID="2662efc444b568c60e3219d1bd62cab5b9a18094840889cc744d45a92c03c87f" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.438646 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hn8v9"] Nov 26 16:11:42 crc kubenswrapper[5010]: E1126 16:11:42.438985 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="registry-server" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.439002 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="registry-server" Nov 26 16:11:42 crc kubenswrapper[5010]: E1126 16:11:42.439028 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="extract-content" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.439036 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="extract-content" Nov 26 16:11:42 crc kubenswrapper[5010]: E1126 16:11:42.439056 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="extract-utilities" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.439064 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="extract-utilities" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.439259 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="892ee660-1736-4c75-a99d-a016a15f0257" containerName="registry-server" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.440479 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.454736 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hn8v9"] Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.549258 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-catalog-content\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.549357 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pf4vw\" (UniqueName: \"kubernetes.io/projected/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-kube-api-access-pf4vw\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.549387 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-utilities\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.651092 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-catalog-content\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.651196 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pf4vw\" (UniqueName: \"kubernetes.io/projected/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-kube-api-access-pf4vw\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.651220 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-utilities\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.651650 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-catalog-content\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.651668 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-utilities\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.674637 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pf4vw\" (UniqueName: \"kubernetes.io/projected/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-kube-api-access-pf4vw\") pod \"redhat-operators-hn8v9\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:42 crc kubenswrapper[5010]: I1126 16:11:42.769126 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:43 crc kubenswrapper[5010]: I1126 16:11:43.212409 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hn8v9"] Nov 26 16:11:43 crc kubenswrapper[5010]: I1126 16:11:43.330662 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hn8v9" event={"ID":"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af","Type":"ContainerStarted","Data":"7070dd5d8c892f1f4370ca7a7100576bb28a67dc3dedebcfc5442176e51482f0"} Nov 26 16:11:44 crc kubenswrapper[5010]: I1126 16:11:44.338104 5010 generic.go:334] "Generic (PLEG): container finished" podID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerID="8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0" exitCode=0 Nov 26 16:11:44 crc kubenswrapper[5010]: I1126 16:11:44.338242 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hn8v9" event={"ID":"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af","Type":"ContainerDied","Data":"8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0"} Nov 26 16:11:45 crc kubenswrapper[5010]: I1126 16:11:45.348045 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hn8v9" event={"ID":"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af","Type":"ContainerStarted","Data":"57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d"} Nov 26 16:11:46 crc kubenswrapper[5010]: I1126 16:11:46.358168 5010 generic.go:334] "Generic (PLEG): container finished" podID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerID="57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d" exitCode=0 Nov 26 16:11:46 crc kubenswrapper[5010]: I1126 16:11:46.358229 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hn8v9" event={"ID":"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af","Type":"ContainerDied","Data":"57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d"} Nov 26 16:11:47 crc kubenswrapper[5010]: I1126 16:11:47.367467 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hn8v9" event={"ID":"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af","Type":"ContainerStarted","Data":"700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9"} Nov 26 16:11:47 crc kubenswrapper[5010]: I1126 16:11:47.385683 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hn8v9" podStartSLOduration=2.726803074 podStartE2EDuration="5.385667039s" podCreationTimestamp="2025-11-26 16:11:42 +0000 UTC" firstStartedPulling="2025-11-26 16:11:44.340874155 +0000 UTC m=+2725.131591303" lastFinishedPulling="2025-11-26 16:11:46.99973809 +0000 UTC m=+2727.790455268" observedRunningTime="2025-11-26 16:11:47.382761467 +0000 UTC m=+2728.173478635" watchObservedRunningTime="2025-11-26 16:11:47.385667039 +0000 UTC m=+2728.176384187" Nov 26 16:11:52 crc kubenswrapper[5010]: I1126 16:11:52.770006 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:52 crc kubenswrapper[5010]: I1126 16:11:52.771557 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:11:53 crc kubenswrapper[5010]: I1126 16:11:53.840976 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hn8v9" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="registry-server" probeResult="failure" output=< Nov 26 16:11:53 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 16:11:53 crc kubenswrapper[5010]: > Nov 26 16:12:02 crc kubenswrapper[5010]: I1126 16:12:02.824991 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:12:02 crc kubenswrapper[5010]: I1126 16:12:02.883460 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:12:03 crc kubenswrapper[5010]: I1126 16:12:03.080824 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hn8v9"] Nov 26 16:12:04 crc kubenswrapper[5010]: I1126 16:12:04.526533 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hn8v9" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="registry-server" containerID="cri-o://700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9" gracePeriod=2 Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.119445 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.232634 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-utilities\") pod \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.232679 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-catalog-content\") pod \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.232763 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pf4vw\" (UniqueName: \"kubernetes.io/projected/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-kube-api-access-pf4vw\") pod \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\" (UID: \"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af\") " Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.234387 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-utilities" (OuterVolumeSpecName: "utilities") pod "cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" (UID: "cdaa673e-6fd7-41a4-97e3-6ea4b86c30af"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.240679 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-kube-api-access-pf4vw" (OuterVolumeSpecName: "kube-api-access-pf4vw") pod "cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" (UID: "cdaa673e-6fd7-41a4-97e3-6ea4b86c30af"). InnerVolumeSpecName "kube-api-access-pf4vw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.334983 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pf4vw\" (UniqueName: \"kubernetes.io/projected/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-kube-api-access-pf4vw\") on node \"crc\" DevicePath \"\"" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.335038 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.351894 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" (UID: "cdaa673e-6fd7-41a4-97e3-6ea4b86c30af"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.437166 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.537178 5010 generic.go:334] "Generic (PLEG): container finished" podID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerID="700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9" exitCode=0 Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.537227 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hn8v9" event={"ID":"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af","Type":"ContainerDied","Data":"700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9"} Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.537265 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hn8v9" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.537267 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hn8v9" event={"ID":"cdaa673e-6fd7-41a4-97e3-6ea4b86c30af","Type":"ContainerDied","Data":"7070dd5d8c892f1f4370ca7a7100576bb28a67dc3dedebcfc5442176e51482f0"} Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.537305 5010 scope.go:117] "RemoveContainer" containerID="700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.565157 5010 scope.go:117] "RemoveContainer" containerID="57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.585655 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hn8v9"] Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.590646 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hn8v9"] Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.609737 5010 scope.go:117] "RemoveContainer" containerID="8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.641168 5010 scope.go:117] "RemoveContainer" containerID="700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9" Nov 26 16:12:05 crc kubenswrapper[5010]: E1126 16:12:05.641751 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9\": container with ID starting with 700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9 not found: ID does not exist" containerID="700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.641825 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9"} err="failed to get container status \"700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9\": rpc error: code = NotFound desc = could not find container \"700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9\": container with ID starting with 700d48836136845f2d834f01df5a154e2cd77def7dc834fb9523f49c43f7a8a9 not found: ID does not exist" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.641871 5010 scope.go:117] "RemoveContainer" containerID="57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d" Nov 26 16:12:05 crc kubenswrapper[5010]: E1126 16:12:05.642426 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d\": container with ID starting with 57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d not found: ID does not exist" containerID="57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.642469 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d"} err="failed to get container status \"57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d\": rpc error: code = NotFound desc = could not find container \"57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d\": container with ID starting with 57fcfe105407e82fd7108d025543715d418e20852e14fa526df9da52b896603d not found: ID does not exist" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.642497 5010 scope.go:117] "RemoveContainer" containerID="8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0" Nov 26 16:12:05 crc kubenswrapper[5010]: E1126 16:12:05.642944 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0\": container with ID starting with 8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0 not found: ID does not exist" containerID="8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.642982 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0"} err="failed to get container status \"8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0\": rpc error: code = NotFound desc = could not find container \"8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0\": container with ID starting with 8deb57e1c031cbd66cec40e1963381741d6bd5854e47ce022c903f737b276db0 not found: ID does not exist" Nov 26 16:12:05 crc kubenswrapper[5010]: I1126 16:12:05.906344 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" path="/var/lib/kubelet/pods/cdaa673e-6fd7-41a4-97e3-6ea4b86c30af/volumes" Nov 26 16:12:44 crc kubenswrapper[5010]: I1126 16:12:44.975060 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ds8nd"] Nov 26 16:12:44 crc kubenswrapper[5010]: E1126 16:12:44.976341 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="extract-utilities" Nov 26 16:12:44 crc kubenswrapper[5010]: I1126 16:12:44.976368 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="extract-utilities" Nov 26 16:12:44 crc kubenswrapper[5010]: E1126 16:12:44.976410 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="extract-content" Nov 26 16:12:44 crc kubenswrapper[5010]: I1126 16:12:44.976428 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="extract-content" Nov 26 16:12:44 crc kubenswrapper[5010]: E1126 16:12:44.976463 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="registry-server" Nov 26 16:12:44 crc kubenswrapper[5010]: I1126 16:12:44.976479 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="registry-server" Nov 26 16:12:44 crc kubenswrapper[5010]: I1126 16:12:44.977029 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdaa673e-6fd7-41a4-97e3-6ea4b86c30af" containerName="registry-server" Nov 26 16:12:44 crc kubenswrapper[5010]: I1126 16:12:44.982315 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:44 crc kubenswrapper[5010]: I1126 16:12:44.993483 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ds8nd"] Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.081323 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-utilities\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.081505 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-catalog-content\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.081548 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rfzqt\" (UniqueName: \"kubernetes.io/projected/88f9e25d-4585-47b4-b5f0-95a1e6866742-kube-api-access-rfzqt\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.182503 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-catalog-content\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.182563 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rfzqt\" (UniqueName: \"kubernetes.io/projected/88f9e25d-4585-47b4-b5f0-95a1e6866742-kube-api-access-rfzqt\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.182636 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-utilities\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.183118 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-catalog-content\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.183141 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-utilities\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.204958 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rfzqt\" (UniqueName: \"kubernetes.io/projected/88f9e25d-4585-47b4-b5f0-95a1e6866742-kube-api-access-rfzqt\") pod \"certified-operators-ds8nd\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.330771 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.803789 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ds8nd"] Nov 26 16:12:45 crc kubenswrapper[5010]: I1126 16:12:45.948285 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds8nd" event={"ID":"88f9e25d-4585-47b4-b5f0-95a1e6866742","Type":"ContainerStarted","Data":"1264fe8e8294963b77a83da4047c36fcee78d4b72719166979b91086b4093402"} Nov 26 16:12:46 crc kubenswrapper[5010]: I1126 16:12:46.960269 5010 generic.go:334] "Generic (PLEG): container finished" podID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerID="5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755" exitCode=0 Nov 26 16:12:46 crc kubenswrapper[5010]: I1126 16:12:46.960325 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds8nd" event={"ID":"88f9e25d-4585-47b4-b5f0-95a1e6866742","Type":"ContainerDied","Data":"5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755"} Nov 26 16:12:46 crc kubenswrapper[5010]: I1126 16:12:46.963315 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:12:51 crc kubenswrapper[5010]: I1126 16:12:51.003396 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds8nd" event={"ID":"88f9e25d-4585-47b4-b5f0-95a1e6866742","Type":"ContainerStarted","Data":"9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86"} Nov 26 16:12:52 crc kubenswrapper[5010]: I1126 16:12:52.014977 5010 generic.go:334] "Generic (PLEG): container finished" podID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerID="9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86" exitCode=0 Nov 26 16:12:52 crc kubenswrapper[5010]: I1126 16:12:52.015020 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds8nd" event={"ID":"88f9e25d-4585-47b4-b5f0-95a1e6866742","Type":"ContainerDied","Data":"9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86"} Nov 26 16:12:53 crc kubenswrapper[5010]: I1126 16:12:53.029000 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds8nd" event={"ID":"88f9e25d-4585-47b4-b5f0-95a1e6866742","Type":"ContainerStarted","Data":"3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0"} Nov 26 16:12:53 crc kubenswrapper[5010]: I1126 16:12:53.054272 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ds8nd" podStartSLOduration=3.582602926 podStartE2EDuration="9.054244332s" podCreationTimestamp="2025-11-26 16:12:44 +0000 UTC" firstStartedPulling="2025-11-26 16:12:46.963017554 +0000 UTC m=+2787.753734712" lastFinishedPulling="2025-11-26 16:12:52.43465894 +0000 UTC m=+2793.225376118" observedRunningTime="2025-11-26 16:12:53.048331496 +0000 UTC m=+2793.839048664" watchObservedRunningTime="2025-11-26 16:12:53.054244332 +0000 UTC m=+2793.844961520" Nov 26 16:12:55 crc kubenswrapper[5010]: I1126 16:12:55.333396 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:55 crc kubenswrapper[5010]: I1126 16:12:55.333948 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:12:55 crc kubenswrapper[5010]: I1126 16:12:55.404485 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:13:05 crc kubenswrapper[5010]: I1126 16:13:05.411805 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:13:05 crc kubenswrapper[5010]: I1126 16:13:05.521617 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ds8nd"] Nov 26 16:13:05 crc kubenswrapper[5010]: I1126 16:13:05.589197 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-csc9n"] Nov 26 16:13:05 crc kubenswrapper[5010]: I1126 16:13:05.589492 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-csc9n" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="registry-server" containerID="cri-o://621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a" gracePeriod=2 Nov 26 16:13:05 crc kubenswrapper[5010]: E1126 16:13:05.744054 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod422e914d_f59a_4d7b_9614_77db0cf86ab6.slice/crio-conmon-621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.044594 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.124110 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cglhd\" (UniqueName: \"kubernetes.io/projected/422e914d-f59a-4d7b-9614-77db0cf86ab6-kube-api-access-cglhd\") pod \"422e914d-f59a-4d7b-9614-77db0cf86ab6\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.124178 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-utilities\") pod \"422e914d-f59a-4d7b-9614-77db0cf86ab6\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.124259 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-catalog-content\") pod \"422e914d-f59a-4d7b-9614-77db0cf86ab6\" (UID: \"422e914d-f59a-4d7b-9614-77db0cf86ab6\") " Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.127327 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-utilities" (OuterVolumeSpecName: "utilities") pod "422e914d-f59a-4d7b-9614-77db0cf86ab6" (UID: "422e914d-f59a-4d7b-9614-77db0cf86ab6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.136849 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/422e914d-f59a-4d7b-9614-77db0cf86ab6-kube-api-access-cglhd" (OuterVolumeSpecName: "kube-api-access-cglhd") pod "422e914d-f59a-4d7b-9614-77db0cf86ab6" (UID: "422e914d-f59a-4d7b-9614-77db0cf86ab6"). InnerVolumeSpecName "kube-api-access-cglhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.148782 5010 generic.go:334] "Generic (PLEG): container finished" podID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerID="621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a" exitCode=0 Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.148865 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-csc9n" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.148864 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerDied","Data":"621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a"} Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.149001 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-csc9n" event={"ID":"422e914d-f59a-4d7b-9614-77db0cf86ab6","Type":"ContainerDied","Data":"69ddf6e7f5ff6211c6d55bd29417cc1f4a46f11c205f937e800c98b4375769c1"} Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.149043 5010 scope.go:117] "RemoveContainer" containerID="621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.167848 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "422e914d-f59a-4d7b-9614-77db0cf86ab6" (UID: "422e914d-f59a-4d7b-9614-77db0cf86ab6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.182482 5010 scope.go:117] "RemoveContainer" containerID="63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.209792 5010 scope.go:117] "RemoveContainer" containerID="d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.225886 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.225919 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cglhd\" (UniqueName: \"kubernetes.io/projected/422e914d-f59a-4d7b-9614-77db0cf86ab6-kube-api-access-cglhd\") on node \"crc\" DevicePath \"\"" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.225930 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/422e914d-f59a-4d7b-9614-77db0cf86ab6-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.244433 5010 scope.go:117] "RemoveContainer" containerID="621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a" Nov 26 16:13:06 crc kubenswrapper[5010]: E1126 16:13:06.245004 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a\": container with ID starting with 621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a not found: ID does not exist" containerID="621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.245052 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a"} err="failed to get container status \"621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a\": rpc error: code = NotFound desc = could not find container \"621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a\": container with ID starting with 621c6d4c921308897982e73176fd1173f465ac29068e7345decdf6753f85710a not found: ID does not exist" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.245082 5010 scope.go:117] "RemoveContainer" containerID="63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32" Nov 26 16:13:06 crc kubenswrapper[5010]: E1126 16:13:06.245490 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32\": container with ID starting with 63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32 not found: ID does not exist" containerID="63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.245586 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32"} err="failed to get container status \"63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32\": rpc error: code = NotFound desc = could not find container \"63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32\": container with ID starting with 63f1add6c9fe7e73502deb9dae2aa6cfad7a7a800af2691c0ebf8d614f741d32 not found: ID does not exist" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.245672 5010 scope.go:117] "RemoveContainer" containerID="d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51" Nov 26 16:13:06 crc kubenswrapper[5010]: E1126 16:13:06.246071 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51\": container with ID starting with d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51 not found: ID does not exist" containerID="d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.246150 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51"} err="failed to get container status \"d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51\": rpc error: code = NotFound desc = could not find container \"d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51\": container with ID starting with d411b8f21b5dc177b4bea492023c4ce0ea99980f6a164d0ca3d7999ca27c1f51 not found: ID does not exist" Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.477993 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-csc9n"] Nov 26 16:13:06 crc kubenswrapper[5010]: I1126 16:13:06.502113 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-csc9n"] Nov 26 16:13:07 crc kubenswrapper[5010]: I1126 16:13:07.901513 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" path="/var/lib/kubelet/pods/422e914d-f59a-4d7b-9614-77db0cf86ab6/volumes" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.789142 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vvph8"] Nov 26 16:13:27 crc kubenswrapper[5010]: E1126 16:13:27.790594 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="registry-server" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.790620 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="registry-server" Nov 26 16:13:27 crc kubenswrapper[5010]: E1126 16:13:27.790676 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="extract-utilities" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.790691 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="extract-utilities" Nov 26 16:13:27 crc kubenswrapper[5010]: E1126 16:13:27.790761 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="extract-content" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.790775 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="extract-content" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.791032 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="422e914d-f59a-4d7b-9614-77db0cf86ab6" containerName="registry-server" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.793343 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.812837 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vvph8"] Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.943841 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-utilities\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.943890 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6brt\" (UniqueName: \"kubernetes.io/projected/1209258b-d550-4063-9fe3-09eb0044bfff-kube-api-access-f6brt\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:27 crc kubenswrapper[5010]: I1126 16:13:27.944003 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-catalog-content\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.045476 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-utilities\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.045565 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6brt\" (UniqueName: \"kubernetes.io/projected/1209258b-d550-4063-9fe3-09eb0044bfff-kube-api-access-f6brt\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.045599 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-catalog-content\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.046096 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-utilities\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.046117 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-catalog-content\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.078808 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6brt\" (UniqueName: \"kubernetes.io/projected/1209258b-d550-4063-9fe3-09eb0044bfff-kube-api-access-f6brt\") pod \"redhat-marketplace-vvph8\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.170208 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.434346 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vvph8"] Nov 26 16:13:28 crc kubenswrapper[5010]: I1126 16:13:28.532761 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vvph8" event={"ID":"1209258b-d550-4063-9fe3-09eb0044bfff","Type":"ContainerStarted","Data":"1f5c9f3e354efd9070a64b9e2a99e97f5a8f73af0cc63f9578b8f46a2dad2c8a"} Nov 26 16:13:29 crc kubenswrapper[5010]: I1126 16:13:29.545811 5010 generic.go:334] "Generic (PLEG): container finished" podID="1209258b-d550-4063-9fe3-09eb0044bfff" containerID="ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58" exitCode=0 Nov 26 16:13:29 crc kubenswrapper[5010]: I1126 16:13:29.545914 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vvph8" event={"ID":"1209258b-d550-4063-9fe3-09eb0044bfff","Type":"ContainerDied","Data":"ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58"} Nov 26 16:13:31 crc kubenswrapper[5010]: I1126 16:13:31.567752 5010 generic.go:334] "Generic (PLEG): container finished" podID="1209258b-d550-4063-9fe3-09eb0044bfff" containerID="21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931" exitCode=0 Nov 26 16:13:31 crc kubenswrapper[5010]: I1126 16:13:31.567942 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vvph8" event={"ID":"1209258b-d550-4063-9fe3-09eb0044bfff","Type":"ContainerDied","Data":"21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931"} Nov 26 16:13:32 crc kubenswrapper[5010]: I1126 16:13:32.582623 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vvph8" event={"ID":"1209258b-d550-4063-9fe3-09eb0044bfff","Type":"ContainerStarted","Data":"455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306"} Nov 26 16:13:32 crc kubenswrapper[5010]: I1126 16:13:32.616436 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vvph8" podStartSLOduration=3.077830951 podStartE2EDuration="5.616405103s" podCreationTimestamp="2025-11-26 16:13:27 +0000 UTC" firstStartedPulling="2025-11-26 16:13:29.548439035 +0000 UTC m=+2830.339156223" lastFinishedPulling="2025-11-26 16:13:32.087013187 +0000 UTC m=+2832.877730375" observedRunningTime="2025-11-26 16:13:32.608563528 +0000 UTC m=+2833.399280706" watchObservedRunningTime="2025-11-26 16:13:32.616405103 +0000 UTC m=+2833.407122301" Nov 26 16:13:38 crc kubenswrapper[5010]: I1126 16:13:38.171016 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:38 crc kubenswrapper[5010]: I1126 16:13:38.172518 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:38 crc kubenswrapper[5010]: I1126 16:13:38.223573 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:38 crc kubenswrapper[5010]: I1126 16:13:38.700780 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:38 crc kubenswrapper[5010]: I1126 16:13:38.761192 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vvph8"] Nov 26 16:13:40 crc kubenswrapper[5010]: I1126 16:13:40.655368 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vvph8" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="registry-server" containerID="cri-o://455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306" gracePeriod=2 Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.424072 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.424852 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.631740 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.667529 5010 generic.go:334] "Generic (PLEG): container finished" podID="1209258b-d550-4063-9fe3-09eb0044bfff" containerID="455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306" exitCode=0 Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.667593 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vvph8" event={"ID":"1209258b-d550-4063-9fe3-09eb0044bfff","Type":"ContainerDied","Data":"455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306"} Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.667746 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vvph8" event={"ID":"1209258b-d550-4063-9fe3-09eb0044bfff","Type":"ContainerDied","Data":"1f5c9f3e354efd9070a64b9e2a99e97f5a8f73af0cc63f9578b8f46a2dad2c8a"} Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.667772 5010 scope.go:117] "RemoveContainer" containerID="455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.667826 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vvph8" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.697174 5010 scope.go:117] "RemoveContainer" containerID="21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.719506 5010 scope.go:117] "RemoveContainer" containerID="ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.740072 5010 scope.go:117] "RemoveContainer" containerID="455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306" Nov 26 16:13:41 crc kubenswrapper[5010]: E1126 16:13:41.740498 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306\": container with ID starting with 455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306 not found: ID does not exist" containerID="455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.740528 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306"} err="failed to get container status \"455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306\": rpc error: code = NotFound desc = could not find container \"455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306\": container with ID starting with 455bf69c17ec70abf3ff6c5bcf001333b29cf90db85e606727034cd85c09b306 not found: ID does not exist" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.740550 5010 scope.go:117] "RemoveContainer" containerID="21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931" Nov 26 16:13:41 crc kubenswrapper[5010]: E1126 16:13:41.740787 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931\": container with ID starting with 21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931 not found: ID does not exist" containerID="21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.740810 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931"} err="failed to get container status \"21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931\": rpc error: code = NotFound desc = could not find container \"21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931\": container with ID starting with 21082d87a4060d5e5f1b2329d0cc4c80aa3ad16c20c8f2d52a496ede32a77931 not found: ID does not exist" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.740823 5010 scope.go:117] "RemoveContainer" containerID="ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58" Nov 26 16:13:41 crc kubenswrapper[5010]: E1126 16:13:41.741004 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58\": container with ID starting with ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58 not found: ID does not exist" containerID="ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.741029 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58"} err="failed to get container status \"ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58\": rpc error: code = NotFound desc = could not find container \"ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58\": container with ID starting with ca57ec2a7845b670c338b4b473ff809e30d62308e9c2524046df11b3e6e23c58 not found: ID does not exist" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.749687 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6brt\" (UniqueName: \"kubernetes.io/projected/1209258b-d550-4063-9fe3-09eb0044bfff-kube-api-access-f6brt\") pod \"1209258b-d550-4063-9fe3-09eb0044bfff\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.750655 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-utilities\") pod \"1209258b-d550-4063-9fe3-09eb0044bfff\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.751524 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-catalog-content\") pod \"1209258b-d550-4063-9fe3-09eb0044bfff\" (UID: \"1209258b-d550-4063-9fe3-09eb0044bfff\") " Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.752870 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-utilities" (OuterVolumeSpecName: "utilities") pod "1209258b-d550-4063-9fe3-09eb0044bfff" (UID: "1209258b-d550-4063-9fe3-09eb0044bfff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.755633 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1209258b-d550-4063-9fe3-09eb0044bfff-kube-api-access-f6brt" (OuterVolumeSpecName: "kube-api-access-f6brt") pod "1209258b-d550-4063-9fe3-09eb0044bfff" (UID: "1209258b-d550-4063-9fe3-09eb0044bfff"). InnerVolumeSpecName "kube-api-access-f6brt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.769503 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1209258b-d550-4063-9fe3-09eb0044bfff" (UID: "1209258b-d550-4063-9fe3-09eb0044bfff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.853336 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6brt\" (UniqueName: \"kubernetes.io/projected/1209258b-d550-4063-9fe3-09eb0044bfff-kube-api-access-f6brt\") on node \"crc\" DevicePath \"\"" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.853389 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.853408 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1209258b-d550-4063-9fe3-09eb0044bfff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.993436 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vvph8"] Nov 26 16:13:41 crc kubenswrapper[5010]: I1126 16:13:41.998726 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vvph8"] Nov 26 16:13:43 crc kubenswrapper[5010]: I1126 16:13:43.923946 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" path="/var/lib/kubelet/pods/1209258b-d550-4063-9fe3-09eb0044bfff/volumes" Nov 26 16:14:11 crc kubenswrapper[5010]: I1126 16:14:11.422788 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:14:11 crc kubenswrapper[5010]: I1126 16:14:11.423740 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:14:41 crc kubenswrapper[5010]: I1126 16:14:41.422485 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:14:41 crc kubenswrapper[5010]: I1126 16:14:41.423094 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:14:41 crc kubenswrapper[5010]: I1126 16:14:41.423145 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:14:41 crc kubenswrapper[5010]: I1126 16:14:41.423838 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:14:41 crc kubenswrapper[5010]: I1126 16:14:41.423895 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" gracePeriod=600 Nov 26 16:14:41 crc kubenswrapper[5010]: E1126 16:14:41.558220 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:14:42 crc kubenswrapper[5010]: I1126 16:14:42.198204 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" exitCode=0 Nov 26 16:14:42 crc kubenswrapper[5010]: I1126 16:14:42.198255 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20"} Nov 26 16:14:42 crc kubenswrapper[5010]: I1126 16:14:42.198301 5010 scope.go:117] "RemoveContainer" containerID="a4a5c63b6ed2cecf0e4ccfd02ca275e191e0443e6d00734a90f0a7fbd37328b0" Nov 26 16:14:42 crc kubenswrapper[5010]: I1126 16:14:42.198974 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:14:42 crc kubenswrapper[5010]: E1126 16:14:42.199365 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:14:55 crc kubenswrapper[5010]: I1126 16:14:55.892335 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:14:55 crc kubenswrapper[5010]: E1126 16:14:55.893150 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.211573 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts"] Nov 26 16:15:00 crc kubenswrapper[5010]: E1126 16:15:00.213560 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="extract-content" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.213675 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="extract-content" Nov 26 16:15:00 crc kubenswrapper[5010]: E1126 16:15:00.213803 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="registry-server" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.213892 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="registry-server" Nov 26 16:15:00 crc kubenswrapper[5010]: E1126 16:15:00.213964 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="extract-utilities" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.214026 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="extract-utilities" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.214232 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1209258b-d550-4063-9fe3-09eb0044bfff" containerName="registry-server" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.214818 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.216975 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.219671 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.240886 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts"] Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.332535 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2ea81325-8811-4b0c-90c5-500aaaaada9f-secret-volume\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.332602 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bml9z\" (UniqueName: \"kubernetes.io/projected/2ea81325-8811-4b0c-90c5-500aaaaada9f-kube-api-access-bml9z\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.332701 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2ea81325-8811-4b0c-90c5-500aaaaada9f-config-volume\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.435056 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2ea81325-8811-4b0c-90c5-500aaaaada9f-secret-volume\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.435154 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bml9z\" (UniqueName: \"kubernetes.io/projected/2ea81325-8811-4b0c-90c5-500aaaaada9f-kube-api-access-bml9z\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.435217 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2ea81325-8811-4b0c-90c5-500aaaaada9f-config-volume\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.436555 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2ea81325-8811-4b0c-90c5-500aaaaada9f-config-volume\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.441642 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2ea81325-8811-4b0c-90c5-500aaaaada9f-secret-volume\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.450798 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bml9z\" (UniqueName: \"kubernetes.io/projected/2ea81325-8811-4b0c-90c5-500aaaaada9f-kube-api-access-bml9z\") pod \"collect-profiles-29402895-h4mts\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.539339 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:00 crc kubenswrapper[5010]: I1126 16:15:00.980034 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts"] Nov 26 16:15:01 crc kubenswrapper[5010]: I1126 16:15:01.374663 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" event={"ID":"2ea81325-8811-4b0c-90c5-500aaaaada9f","Type":"ContainerStarted","Data":"ac806371774bbf315bd728cdf025c34558385d43cdb5a05b329c8f70458f0fc9"} Nov 26 16:15:01 crc kubenswrapper[5010]: I1126 16:15:01.375007 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" event={"ID":"2ea81325-8811-4b0c-90c5-500aaaaada9f","Type":"ContainerStarted","Data":"7415d45404dc4e091cd3a106e17e0303dcec9b58e2717885bec259d186002f24"} Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.387428 5010 generic.go:334] "Generic (PLEG): container finished" podID="2ea81325-8811-4b0c-90c5-500aaaaada9f" containerID="ac806371774bbf315bd728cdf025c34558385d43cdb5a05b329c8f70458f0fc9" exitCode=0 Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.387501 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" event={"ID":"2ea81325-8811-4b0c-90c5-500aaaaada9f","Type":"ContainerDied","Data":"ac806371774bbf315bd728cdf025c34558385d43cdb5a05b329c8f70458f0fc9"} Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.762805 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.875725 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2ea81325-8811-4b0c-90c5-500aaaaada9f-config-volume\") pod \"2ea81325-8811-4b0c-90c5-500aaaaada9f\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.875793 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2ea81325-8811-4b0c-90c5-500aaaaada9f-secret-volume\") pod \"2ea81325-8811-4b0c-90c5-500aaaaada9f\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.875823 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bml9z\" (UniqueName: \"kubernetes.io/projected/2ea81325-8811-4b0c-90c5-500aaaaada9f-kube-api-access-bml9z\") pod \"2ea81325-8811-4b0c-90c5-500aaaaada9f\" (UID: \"2ea81325-8811-4b0c-90c5-500aaaaada9f\") " Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.877207 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ea81325-8811-4b0c-90c5-500aaaaada9f-config-volume" (OuterVolumeSpecName: "config-volume") pod "2ea81325-8811-4b0c-90c5-500aaaaada9f" (UID: "2ea81325-8811-4b0c-90c5-500aaaaada9f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.883295 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2ea81325-8811-4b0c-90c5-500aaaaada9f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2ea81325-8811-4b0c-90c5-500aaaaada9f" (UID: "2ea81325-8811-4b0c-90c5-500aaaaada9f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.889003 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ea81325-8811-4b0c-90c5-500aaaaada9f-kube-api-access-bml9z" (OuterVolumeSpecName: "kube-api-access-bml9z") pod "2ea81325-8811-4b0c-90c5-500aaaaada9f" (UID: "2ea81325-8811-4b0c-90c5-500aaaaada9f"). InnerVolumeSpecName "kube-api-access-bml9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.977609 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2ea81325-8811-4b0c-90c5-500aaaaada9f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.977655 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bml9z\" (UniqueName: \"kubernetes.io/projected/2ea81325-8811-4b0c-90c5-500aaaaada9f-kube-api-access-bml9z\") on node \"crc\" DevicePath \"\"" Nov 26 16:15:02 crc kubenswrapper[5010]: I1126 16:15:02.977670 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2ea81325-8811-4b0c-90c5-500aaaaada9f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:15:03 crc kubenswrapper[5010]: I1126 16:15:03.398611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" event={"ID":"2ea81325-8811-4b0c-90c5-500aaaaada9f","Type":"ContainerDied","Data":"7415d45404dc4e091cd3a106e17e0303dcec9b58e2717885bec259d186002f24"} Nov 26 16:15:03 crc kubenswrapper[5010]: I1126 16:15:03.398671 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts" Nov 26 16:15:03 crc kubenswrapper[5010]: I1126 16:15:03.398684 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7415d45404dc4e091cd3a106e17e0303dcec9b58e2717885bec259d186002f24" Nov 26 16:15:03 crc kubenswrapper[5010]: I1126 16:15:03.842838 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t"] Nov 26 16:15:03 crc kubenswrapper[5010]: I1126 16:15:03.850426 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402850-qqc6t"] Nov 26 16:15:03 crc kubenswrapper[5010]: I1126 16:15:03.900656 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9803034-f657-474c-aad0-4d2cfc54ed20" path="/var/lib/kubelet/pods/f9803034-f657-474c-aad0-4d2cfc54ed20/volumes" Nov 26 16:15:07 crc kubenswrapper[5010]: I1126 16:15:07.891845 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:15:07 crc kubenswrapper[5010]: E1126 16:15:07.892662 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:15:19 crc kubenswrapper[5010]: I1126 16:15:19.895241 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:15:19 crc kubenswrapper[5010]: E1126 16:15:19.895978 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:15:30 crc kubenswrapper[5010]: I1126 16:15:30.894081 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:15:30 crc kubenswrapper[5010]: E1126 16:15:30.895392 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:15:45 crc kubenswrapper[5010]: I1126 16:15:45.891519 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:15:45 crc kubenswrapper[5010]: E1126 16:15:45.893890 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:15:56 crc kubenswrapper[5010]: I1126 16:15:56.891163 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:15:56 crc kubenswrapper[5010]: E1126 16:15:56.891822 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:16:03 crc kubenswrapper[5010]: I1126 16:16:03.771927 5010 scope.go:117] "RemoveContainer" containerID="cdafb804e1ec93e2e611d9ba99b33effcf9a33d8328ef2a09032dfaedb11c394" Nov 26 16:16:11 crc kubenswrapper[5010]: I1126 16:16:11.892131 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:16:11 crc kubenswrapper[5010]: E1126 16:16:11.893182 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:16:25 crc kubenswrapper[5010]: I1126 16:16:25.892594 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:16:25 crc kubenswrapper[5010]: E1126 16:16:25.893800 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:16:38 crc kubenswrapper[5010]: I1126 16:16:38.891638 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:16:38 crc kubenswrapper[5010]: E1126 16:16:38.892502 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:16:53 crc kubenswrapper[5010]: I1126 16:16:53.891735 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:16:53 crc kubenswrapper[5010]: E1126 16:16:53.892691 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:17:06 crc kubenswrapper[5010]: I1126 16:17:06.891268 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:17:06 crc kubenswrapper[5010]: E1126 16:17:06.893559 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:17:21 crc kubenswrapper[5010]: I1126 16:17:21.892196 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:17:21 crc kubenswrapper[5010]: E1126 16:17:21.892926 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.775833 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s6pxw"] Nov 26 16:17:28 crc kubenswrapper[5010]: E1126 16:17:28.776778 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ea81325-8811-4b0c-90c5-500aaaaada9f" containerName="collect-profiles" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.776802 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ea81325-8811-4b0c-90c5-500aaaaada9f" containerName="collect-profiles" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.777050 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ea81325-8811-4b0c-90c5-500aaaaada9f" containerName="collect-profiles" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.779131 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.787316 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s6pxw"] Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.880321 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-catalog-content\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.880501 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-utilities\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.880616 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtwx7\" (UniqueName: \"kubernetes.io/projected/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-kube-api-access-mtwx7\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.981556 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-utilities\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.981624 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtwx7\" (UniqueName: \"kubernetes.io/projected/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-kube-api-access-mtwx7\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.981697 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-catalog-content\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.982193 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-utilities\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:28 crc kubenswrapper[5010]: I1126 16:17:28.982203 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-catalog-content\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:29 crc kubenswrapper[5010]: I1126 16:17:29.007146 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtwx7\" (UniqueName: \"kubernetes.io/projected/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-kube-api-access-mtwx7\") pod \"community-operators-s6pxw\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:29 crc kubenswrapper[5010]: I1126 16:17:29.120112 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:29 crc kubenswrapper[5010]: I1126 16:17:29.348396 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s6pxw"] Nov 26 16:17:29 crc kubenswrapper[5010]: I1126 16:17:29.725249 5010 generic.go:334] "Generic (PLEG): container finished" podID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerID="803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e" exitCode=0 Nov 26 16:17:29 crc kubenswrapper[5010]: I1126 16:17:29.725289 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6pxw" event={"ID":"b8caf56c-cd29-4a03-bf74-6bb33ed00f07","Type":"ContainerDied","Data":"803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e"} Nov 26 16:17:29 crc kubenswrapper[5010]: I1126 16:17:29.725312 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6pxw" event={"ID":"b8caf56c-cd29-4a03-bf74-6bb33ed00f07","Type":"ContainerStarted","Data":"baf95dfba17f0b67ff7813a0200d09436a63fb7036f0961f228c4091ab36fff9"} Nov 26 16:17:31 crc kubenswrapper[5010]: E1126 16:17:31.339152 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8caf56c_cd29_4a03_bf74_6bb33ed00f07.slice/crio-conmon-f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8caf56c_cd29_4a03_bf74_6bb33ed00f07.slice/crio-f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:17:31 crc kubenswrapper[5010]: I1126 16:17:31.760153 5010 generic.go:334] "Generic (PLEG): container finished" podID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerID="f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e" exitCode=0 Nov 26 16:17:31 crc kubenswrapper[5010]: I1126 16:17:31.760266 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6pxw" event={"ID":"b8caf56c-cd29-4a03-bf74-6bb33ed00f07","Type":"ContainerDied","Data":"f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e"} Nov 26 16:17:32 crc kubenswrapper[5010]: I1126 16:17:32.772502 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6pxw" event={"ID":"b8caf56c-cd29-4a03-bf74-6bb33ed00f07","Type":"ContainerStarted","Data":"33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3"} Nov 26 16:17:32 crc kubenswrapper[5010]: I1126 16:17:32.798741 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s6pxw" podStartSLOduration=1.9606445099999998 podStartE2EDuration="4.798649735s" podCreationTimestamp="2025-11-26 16:17:28 +0000 UTC" firstStartedPulling="2025-11-26 16:17:29.726497715 +0000 UTC m=+3070.517214863" lastFinishedPulling="2025-11-26 16:17:32.56450295 +0000 UTC m=+3073.355220088" observedRunningTime="2025-11-26 16:17:32.794198414 +0000 UTC m=+3073.584915552" watchObservedRunningTime="2025-11-26 16:17:32.798649735 +0000 UTC m=+3073.589366923" Nov 26 16:17:32 crc kubenswrapper[5010]: I1126 16:17:32.891821 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:17:32 crc kubenswrapper[5010]: E1126 16:17:32.892575 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:17:39 crc kubenswrapper[5010]: I1126 16:17:39.120740 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:39 crc kubenswrapper[5010]: I1126 16:17:39.121124 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:39 crc kubenswrapper[5010]: I1126 16:17:39.181133 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:39 crc kubenswrapper[5010]: I1126 16:17:39.907016 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:39 crc kubenswrapper[5010]: I1126 16:17:39.973993 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s6pxw"] Nov 26 16:17:41 crc kubenswrapper[5010]: I1126 16:17:41.865635 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s6pxw" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="registry-server" containerID="cri-o://33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3" gracePeriod=2 Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.753765 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.876909 5010 generic.go:334] "Generic (PLEG): container finished" podID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerID="33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3" exitCode=0 Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.876951 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6pxw" event={"ID":"b8caf56c-cd29-4a03-bf74-6bb33ed00f07","Type":"ContainerDied","Data":"33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3"} Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.877016 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s6pxw" event={"ID":"b8caf56c-cd29-4a03-bf74-6bb33ed00f07","Type":"ContainerDied","Data":"baf95dfba17f0b67ff7813a0200d09436a63fb7036f0961f228c4091ab36fff9"} Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.877038 5010 scope.go:117] "RemoveContainer" containerID="33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.877353 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s6pxw" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.897365 5010 scope.go:117] "RemoveContainer" containerID="f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.910274 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-utilities\") pod \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.910380 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtwx7\" (UniqueName: \"kubernetes.io/projected/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-kube-api-access-mtwx7\") pod \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.910471 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-catalog-content\") pod \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\" (UID: \"b8caf56c-cd29-4a03-bf74-6bb33ed00f07\") " Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.911542 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-utilities" (OuterVolumeSpecName: "utilities") pod "b8caf56c-cd29-4a03-bf74-6bb33ed00f07" (UID: "b8caf56c-cd29-4a03-bf74-6bb33ed00f07"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.919952 5010 scope.go:117] "RemoveContainer" containerID="803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.919978 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-kube-api-access-mtwx7" (OuterVolumeSpecName: "kube-api-access-mtwx7") pod "b8caf56c-cd29-4a03-bf74-6bb33ed00f07" (UID: "b8caf56c-cd29-4a03-bf74-6bb33ed00f07"). InnerVolumeSpecName "kube-api-access-mtwx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.963942 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b8caf56c-cd29-4a03-bf74-6bb33ed00f07" (UID: "b8caf56c-cd29-4a03-bf74-6bb33ed00f07"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.964918 5010 scope.go:117] "RemoveContainer" containerID="33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3" Nov 26 16:17:42 crc kubenswrapper[5010]: E1126 16:17:42.965462 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3\": container with ID starting with 33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3 not found: ID does not exist" containerID="33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.965529 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3"} err="failed to get container status \"33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3\": rpc error: code = NotFound desc = could not find container \"33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3\": container with ID starting with 33d03c3b1987a7712be1aedc8744b32215be2549170814af886149c907f397d3 not found: ID does not exist" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.965565 5010 scope.go:117] "RemoveContainer" containerID="f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e" Nov 26 16:17:42 crc kubenswrapper[5010]: E1126 16:17:42.966028 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e\": container with ID starting with f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e not found: ID does not exist" containerID="f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.966056 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e"} err="failed to get container status \"f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e\": rpc error: code = NotFound desc = could not find container \"f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e\": container with ID starting with f62461c1887b11911db164a1d4d7bccbc7dede0f3307acbf552f233bab3a645e not found: ID does not exist" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.966070 5010 scope.go:117] "RemoveContainer" containerID="803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e" Nov 26 16:17:42 crc kubenswrapper[5010]: E1126 16:17:42.966313 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e\": container with ID starting with 803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e not found: ID does not exist" containerID="803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e" Nov 26 16:17:42 crc kubenswrapper[5010]: I1126 16:17:42.966346 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e"} err="failed to get container status \"803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e\": rpc error: code = NotFound desc = could not find container \"803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e\": container with ID starting with 803bae58a29af2a45493fb8ffff836548e6f2d2f424ebfa8f3642481aa3a697e not found: ID does not exist" Nov 26 16:17:43 crc kubenswrapper[5010]: I1126 16:17:43.012348 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:17:43 crc kubenswrapper[5010]: I1126 16:17:43.012371 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:17:43 crc kubenswrapper[5010]: I1126 16:17:43.012380 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtwx7\" (UniqueName: \"kubernetes.io/projected/b8caf56c-cd29-4a03-bf74-6bb33ed00f07-kube-api-access-mtwx7\") on node \"crc\" DevicePath \"\"" Nov 26 16:17:43 crc kubenswrapper[5010]: I1126 16:17:43.228497 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s6pxw"] Nov 26 16:17:43 crc kubenswrapper[5010]: I1126 16:17:43.239947 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s6pxw"] Nov 26 16:17:43 crc kubenswrapper[5010]: I1126 16:17:43.892380 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:17:43 crc kubenswrapper[5010]: E1126 16:17:43.892650 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:17:43 crc kubenswrapper[5010]: I1126 16:17:43.908437 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" path="/var/lib/kubelet/pods/b8caf56c-cd29-4a03-bf74-6bb33ed00f07/volumes" Nov 26 16:17:57 crc kubenswrapper[5010]: I1126 16:17:57.892092 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:17:57 crc kubenswrapper[5010]: E1126 16:17:57.893234 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:18:12 crc kubenswrapper[5010]: I1126 16:18:12.891433 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:18:12 crc kubenswrapper[5010]: E1126 16:18:12.892498 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:18:27 crc kubenswrapper[5010]: I1126 16:18:27.892333 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:18:27 crc kubenswrapper[5010]: E1126 16:18:27.893218 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:18:38 crc kubenswrapper[5010]: I1126 16:18:38.891761 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:18:38 crc kubenswrapper[5010]: E1126 16:18:38.892467 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:18:53 crc kubenswrapper[5010]: I1126 16:18:53.891910 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:18:53 crc kubenswrapper[5010]: E1126 16:18:53.892892 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:19:05 crc kubenswrapper[5010]: I1126 16:19:05.892305 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:19:05 crc kubenswrapper[5010]: E1126 16:19:05.894137 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:19:17 crc kubenswrapper[5010]: I1126 16:19:17.891506 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:19:17 crc kubenswrapper[5010]: E1126 16:19:17.892283 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:19:29 crc kubenswrapper[5010]: I1126 16:19:29.898020 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:19:29 crc kubenswrapper[5010]: E1126 16:19:29.898808 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:19:43 crc kubenswrapper[5010]: I1126 16:19:43.891901 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:19:44 crc kubenswrapper[5010]: I1126 16:19:44.949260 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"19d41a574349f70822f552c86d7cd5e4d8943054274493eef9c7c965cd37ca2a"} Nov 26 16:22:11 crc kubenswrapper[5010]: I1126 16:22:11.422995 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:22:11 crc kubenswrapper[5010]: I1126 16:22:11.423588 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.797549 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8x7gh"] Nov 26 16:22:14 crc kubenswrapper[5010]: E1126 16:22:14.798329 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="extract-content" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.798345 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="extract-content" Nov 26 16:22:14 crc kubenswrapper[5010]: E1126 16:22:14.798371 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="extract-utilities" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.798381 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="extract-utilities" Nov 26 16:22:14 crc kubenswrapper[5010]: E1126 16:22:14.798417 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="registry-server" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.798426 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="registry-server" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.798595 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8caf56c-cd29-4a03-bf74-6bb33ed00f07" containerName="registry-server" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.800506 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.810573 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8x7gh"] Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.911957 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6lxh\" (UniqueName: \"kubernetes.io/projected/6ec784f4-1063-47ad-93fe-d69df711a6ad-kube-api-access-p6lxh\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.912389 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-catalog-content\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:14 crc kubenswrapper[5010]: I1126 16:22:14.912607 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-utilities\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.014159 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-catalog-content\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.014275 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-utilities\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.014396 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6lxh\" (UniqueName: \"kubernetes.io/projected/6ec784f4-1063-47ad-93fe-d69df711a6ad-kube-api-access-p6lxh\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.014812 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-catalog-content\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.015004 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-utilities\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.040477 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6lxh\" (UniqueName: \"kubernetes.io/projected/6ec784f4-1063-47ad-93fe-d69df711a6ad-kube-api-access-p6lxh\") pod \"redhat-operators-8x7gh\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.125775 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:15 crc kubenswrapper[5010]: I1126 16:22:15.612514 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8x7gh"] Nov 26 16:22:16 crc kubenswrapper[5010]: I1126 16:22:16.295984 5010 generic.go:334] "Generic (PLEG): container finished" podID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerID="da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454" exitCode=0 Nov 26 16:22:16 crc kubenswrapper[5010]: I1126 16:22:16.296123 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8x7gh" event={"ID":"6ec784f4-1063-47ad-93fe-d69df711a6ad","Type":"ContainerDied","Data":"da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454"} Nov 26 16:22:16 crc kubenswrapper[5010]: I1126 16:22:16.296392 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8x7gh" event={"ID":"6ec784f4-1063-47ad-93fe-d69df711a6ad","Type":"ContainerStarted","Data":"303a760ff9912a0399f4a84bd45bde048f1188889dad98088331e9579fc734c2"} Nov 26 16:22:16 crc kubenswrapper[5010]: I1126 16:22:16.299758 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:22:17 crc kubenswrapper[5010]: I1126 16:22:17.306331 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8x7gh" event={"ID":"6ec784f4-1063-47ad-93fe-d69df711a6ad","Type":"ContainerStarted","Data":"1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79"} Nov 26 16:22:18 crc kubenswrapper[5010]: I1126 16:22:18.319094 5010 generic.go:334] "Generic (PLEG): container finished" podID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerID="1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79" exitCode=0 Nov 26 16:22:18 crc kubenswrapper[5010]: I1126 16:22:18.319157 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8x7gh" event={"ID":"6ec784f4-1063-47ad-93fe-d69df711a6ad","Type":"ContainerDied","Data":"1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79"} Nov 26 16:22:19 crc kubenswrapper[5010]: I1126 16:22:19.326996 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8x7gh" event={"ID":"6ec784f4-1063-47ad-93fe-d69df711a6ad","Type":"ContainerStarted","Data":"af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac"} Nov 26 16:22:19 crc kubenswrapper[5010]: I1126 16:22:19.351848 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8x7gh" podStartSLOduration=2.752047668 podStartE2EDuration="5.351829867s" podCreationTimestamp="2025-11-26 16:22:14 +0000 UTC" firstStartedPulling="2025-11-26 16:22:16.299135151 +0000 UTC m=+3357.089852339" lastFinishedPulling="2025-11-26 16:22:18.89891738 +0000 UTC m=+3359.689634538" observedRunningTime="2025-11-26 16:22:19.346952985 +0000 UTC m=+3360.137670153" watchObservedRunningTime="2025-11-26 16:22:19.351829867 +0000 UTC m=+3360.142547035" Nov 26 16:22:25 crc kubenswrapper[5010]: I1126 16:22:25.126404 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:25 crc kubenswrapper[5010]: I1126 16:22:25.127044 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:25 crc kubenswrapper[5010]: I1126 16:22:25.179083 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:25 crc kubenswrapper[5010]: I1126 16:22:25.421698 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:25 crc kubenswrapper[5010]: I1126 16:22:25.461553 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8x7gh"] Nov 26 16:22:27 crc kubenswrapper[5010]: I1126 16:22:27.396879 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8x7gh" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="registry-server" containerID="cri-o://af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac" gracePeriod=2 Nov 26 16:22:27 crc kubenswrapper[5010]: E1126 16:22:27.509192 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6ec784f4_1063_47ad_93fe_d69df711a6ad.slice/crio-af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:22:27 crc kubenswrapper[5010]: I1126 16:22:27.778626 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:27 crc kubenswrapper[5010]: I1126 16:22:27.934928 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6lxh\" (UniqueName: \"kubernetes.io/projected/6ec784f4-1063-47ad-93fe-d69df711a6ad-kube-api-access-p6lxh\") pod \"6ec784f4-1063-47ad-93fe-d69df711a6ad\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " Nov 26 16:22:27 crc kubenswrapper[5010]: I1126 16:22:27.935046 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-catalog-content\") pod \"6ec784f4-1063-47ad-93fe-d69df711a6ad\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " Nov 26 16:22:27 crc kubenswrapper[5010]: I1126 16:22:27.935122 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-utilities\") pod \"6ec784f4-1063-47ad-93fe-d69df711a6ad\" (UID: \"6ec784f4-1063-47ad-93fe-d69df711a6ad\") " Nov 26 16:22:27 crc kubenswrapper[5010]: I1126 16:22:27.937147 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-utilities" (OuterVolumeSpecName: "utilities") pod "6ec784f4-1063-47ad-93fe-d69df711a6ad" (UID: "6ec784f4-1063-47ad-93fe-d69df711a6ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:22:27 crc kubenswrapper[5010]: I1126 16:22:27.942190 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ec784f4-1063-47ad-93fe-d69df711a6ad-kube-api-access-p6lxh" (OuterVolumeSpecName: "kube-api-access-p6lxh") pod "6ec784f4-1063-47ad-93fe-d69df711a6ad" (UID: "6ec784f4-1063-47ad-93fe-d69df711a6ad"). InnerVolumeSpecName "kube-api-access-p6lxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.037872 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.037909 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6lxh\" (UniqueName: \"kubernetes.io/projected/6ec784f4-1063-47ad-93fe-d69df711a6ad-kube-api-access-p6lxh\") on node \"crc\" DevicePath \"\"" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.409202 5010 generic.go:334] "Generic (PLEG): container finished" podID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerID="af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac" exitCode=0 Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.409281 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8x7gh" event={"ID":"6ec784f4-1063-47ad-93fe-d69df711a6ad","Type":"ContainerDied","Data":"af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac"} Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.409380 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8x7gh" event={"ID":"6ec784f4-1063-47ad-93fe-d69df711a6ad","Type":"ContainerDied","Data":"303a760ff9912a0399f4a84bd45bde048f1188889dad98088331e9579fc734c2"} Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.409421 5010 scope.go:117] "RemoveContainer" containerID="af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.409308 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8x7gh" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.432541 5010 scope.go:117] "RemoveContainer" containerID="1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.450420 5010 scope.go:117] "RemoveContainer" containerID="da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.505230 5010 scope.go:117] "RemoveContainer" containerID="af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac" Nov 26 16:22:28 crc kubenswrapper[5010]: E1126 16:22:28.505791 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac\": container with ID starting with af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac not found: ID does not exist" containerID="af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.505834 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac"} err="failed to get container status \"af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac\": rpc error: code = NotFound desc = could not find container \"af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac\": container with ID starting with af3a5419aaf6656116c8a3e7b4b6393de84b10aecb1dd521aa6f0750c0d093ac not found: ID does not exist" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.505858 5010 scope.go:117] "RemoveContainer" containerID="1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79" Nov 26 16:22:28 crc kubenswrapper[5010]: E1126 16:22:28.506202 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79\": container with ID starting with 1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79 not found: ID does not exist" containerID="1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.506230 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79"} err="failed to get container status \"1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79\": rpc error: code = NotFound desc = could not find container \"1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79\": container with ID starting with 1212ca796b3af7ea65179ad5e3abe931f5327c37b92b9c66695f4dec2201eb79 not found: ID does not exist" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.506249 5010 scope.go:117] "RemoveContainer" containerID="da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454" Nov 26 16:22:28 crc kubenswrapper[5010]: E1126 16:22:28.506550 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454\": container with ID starting with da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454 not found: ID does not exist" containerID="da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454" Nov 26 16:22:28 crc kubenswrapper[5010]: I1126 16:22:28.506573 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454"} err="failed to get container status \"da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454\": rpc error: code = NotFound desc = could not find container \"da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454\": container with ID starting with da6ac95651d7613ceb5300d91b5a050a37a6f66f888ca51b966399f0add72454 not found: ID does not exist" Nov 26 16:22:29 crc kubenswrapper[5010]: I1126 16:22:29.669269 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6ec784f4-1063-47ad-93fe-d69df711a6ad" (UID: "6ec784f4-1063-47ad-93fe-d69df711a6ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:22:29 crc kubenswrapper[5010]: I1126 16:22:29.763933 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6ec784f4-1063-47ad-93fe-d69df711a6ad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:22:29 crc kubenswrapper[5010]: I1126 16:22:29.964424 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8x7gh"] Nov 26 16:22:29 crc kubenswrapper[5010]: I1126 16:22:29.977304 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8x7gh"] Nov 26 16:22:31 crc kubenswrapper[5010]: I1126 16:22:31.900993 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" path="/var/lib/kubelet/pods/6ec784f4-1063-47ad-93fe-d69df711a6ad/volumes" Nov 26 16:22:41 crc kubenswrapper[5010]: I1126 16:22:41.422996 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:22:41 crc kubenswrapper[5010]: I1126 16:22:41.423819 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.423084 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.423701 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.423810 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.424865 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"19d41a574349f70822f552c86d7cd5e4d8943054274493eef9c7c965cd37ca2a"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.424965 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://19d41a574349f70822f552c86d7cd5e4d8943054274493eef9c7c965cd37ca2a" gracePeriod=600 Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.841544 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="19d41a574349f70822f552c86d7cd5e4d8943054274493eef9c7c965cd37ca2a" exitCode=0 Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.841842 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"19d41a574349f70822f552c86d7cd5e4d8943054274493eef9c7c965cd37ca2a"} Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.841974 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82"} Nov 26 16:23:11 crc kubenswrapper[5010]: I1126 16:23:11.842002 5010 scope.go:117] "RemoveContainer" containerID="e2a42a63bb757d0ec4ca3045471a01489285796d065a954e7b5a97c94b4e7b20" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.086607 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4mtn9"] Nov 26 16:23:44 crc kubenswrapper[5010]: E1126 16:23:44.087492 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="extract-content" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.087505 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="extract-content" Nov 26 16:23:44 crc kubenswrapper[5010]: E1126 16:23:44.087525 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="registry-server" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.087533 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="registry-server" Nov 26 16:23:44 crc kubenswrapper[5010]: E1126 16:23:44.087542 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="extract-utilities" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.087548 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="extract-utilities" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.087779 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ec784f4-1063-47ad-93fe-d69df711a6ad" containerName="registry-server" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.088758 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.107257 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4mtn9"] Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.246750 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-utilities\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.246906 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gm8b\" (UniqueName: \"kubernetes.io/projected/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-kube-api-access-9gm8b\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.247058 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-catalog-content\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.348533 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-utilities\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.348623 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gm8b\" (UniqueName: \"kubernetes.io/projected/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-kube-api-access-9gm8b\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.348743 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-catalog-content\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.349799 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-catalog-content\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.349875 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-utilities\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.375578 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gm8b\" (UniqueName: \"kubernetes.io/projected/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-kube-api-access-9gm8b\") pod \"certified-operators-4mtn9\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.410890 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:44 crc kubenswrapper[5010]: I1126 16:23:44.906029 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4mtn9"] Nov 26 16:23:45 crc kubenswrapper[5010]: I1126 16:23:45.189140 5010 generic.go:334] "Generic (PLEG): container finished" podID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerID="241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759" exitCode=0 Nov 26 16:23:45 crc kubenswrapper[5010]: I1126 16:23:45.189248 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mtn9" event={"ID":"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd","Type":"ContainerDied","Data":"241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759"} Nov 26 16:23:45 crc kubenswrapper[5010]: I1126 16:23:45.189374 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mtn9" event={"ID":"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd","Type":"ContainerStarted","Data":"5a85ced1d63bd1c6936764f3e7a7d8418adbba5bd741a0dea9a9f55480f39067"} Nov 26 16:23:49 crc kubenswrapper[5010]: I1126 16:23:49.231092 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mtn9" event={"ID":"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd","Type":"ContainerStarted","Data":"5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857"} Nov 26 16:23:50 crc kubenswrapper[5010]: I1126 16:23:50.246289 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mtn9" event={"ID":"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd","Type":"ContainerDied","Data":"5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857"} Nov 26 16:23:50 crc kubenswrapper[5010]: I1126 16:23:50.246117 5010 generic.go:334] "Generic (PLEG): container finished" podID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerID="5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857" exitCode=0 Nov 26 16:23:51 crc kubenswrapper[5010]: I1126 16:23:51.262271 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mtn9" event={"ID":"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd","Type":"ContainerStarted","Data":"8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255"} Nov 26 16:23:51 crc kubenswrapper[5010]: I1126 16:23:51.298025 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4mtn9" podStartSLOduration=1.8515473519999999 podStartE2EDuration="7.298006098s" podCreationTimestamp="2025-11-26 16:23:44 +0000 UTC" firstStartedPulling="2025-11-26 16:23:45.191472161 +0000 UTC m=+3445.982189309" lastFinishedPulling="2025-11-26 16:23:50.637930907 +0000 UTC m=+3451.428648055" observedRunningTime="2025-11-26 16:23:51.290506151 +0000 UTC m=+3452.081223319" watchObservedRunningTime="2025-11-26 16:23:51.298006098 +0000 UTC m=+3452.088723256" Nov 26 16:23:54 crc kubenswrapper[5010]: I1126 16:23:54.411959 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:54 crc kubenswrapper[5010]: I1126 16:23:54.412933 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:54 crc kubenswrapper[5010]: I1126 16:23:54.469612 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:55 crc kubenswrapper[5010]: I1126 16:23:55.355208 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:23:55 crc kubenswrapper[5010]: I1126 16:23:55.453160 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4mtn9"] Nov 26 16:23:55 crc kubenswrapper[5010]: I1126 16:23:55.536526 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ds8nd"] Nov 26 16:23:55 crc kubenswrapper[5010]: I1126 16:23:55.536801 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ds8nd" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="registry-server" containerID="cri-o://3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0" gracePeriod=2 Nov 26 16:23:55 crc kubenswrapper[5010]: I1126 16:23:55.930452 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.019351 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-catalog-content\") pod \"88f9e25d-4585-47b4-b5f0-95a1e6866742\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.019454 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-utilities\") pod \"88f9e25d-4585-47b4-b5f0-95a1e6866742\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.019586 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rfzqt\" (UniqueName: \"kubernetes.io/projected/88f9e25d-4585-47b4-b5f0-95a1e6866742-kube-api-access-rfzqt\") pod \"88f9e25d-4585-47b4-b5f0-95a1e6866742\" (UID: \"88f9e25d-4585-47b4-b5f0-95a1e6866742\") " Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.020330 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-utilities" (OuterVolumeSpecName: "utilities") pod "88f9e25d-4585-47b4-b5f0-95a1e6866742" (UID: "88f9e25d-4585-47b4-b5f0-95a1e6866742"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.027919 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88f9e25d-4585-47b4-b5f0-95a1e6866742-kube-api-access-rfzqt" (OuterVolumeSpecName: "kube-api-access-rfzqt") pod "88f9e25d-4585-47b4-b5f0-95a1e6866742" (UID: "88f9e25d-4585-47b4-b5f0-95a1e6866742"). InnerVolumeSpecName "kube-api-access-rfzqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.077439 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "88f9e25d-4585-47b4-b5f0-95a1e6866742" (UID: "88f9e25d-4585-47b4-b5f0-95a1e6866742"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.121854 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.121897 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/88f9e25d-4585-47b4-b5f0-95a1e6866742-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.121912 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rfzqt\" (UniqueName: \"kubernetes.io/projected/88f9e25d-4585-47b4-b5f0-95a1e6866742-kube-api-access-rfzqt\") on node \"crc\" DevicePath \"\"" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.306766 5010 generic.go:334] "Generic (PLEG): container finished" podID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerID="3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0" exitCode=0 Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.306848 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ds8nd" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.306853 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds8nd" event={"ID":"88f9e25d-4585-47b4-b5f0-95a1e6866742","Type":"ContainerDied","Data":"3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0"} Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.306973 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ds8nd" event={"ID":"88f9e25d-4585-47b4-b5f0-95a1e6866742","Type":"ContainerDied","Data":"1264fe8e8294963b77a83da4047c36fcee78d4b72719166979b91086b4093402"} Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.306991 5010 scope.go:117] "RemoveContainer" containerID="3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.326073 5010 scope.go:117] "RemoveContainer" containerID="9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.339320 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ds8nd"] Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.345749 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ds8nd"] Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.360474 5010 scope.go:117] "RemoveContainer" containerID="5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.377402 5010 scope.go:117] "RemoveContainer" containerID="3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0" Nov 26 16:23:56 crc kubenswrapper[5010]: E1126 16:23:56.377939 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0\": container with ID starting with 3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0 not found: ID does not exist" containerID="3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.377969 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0"} err="failed to get container status \"3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0\": rpc error: code = NotFound desc = could not find container \"3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0\": container with ID starting with 3449511d789bedd23f54b02f4d650168527fda7414cbb86ed4c9037d9513d0e0 not found: ID does not exist" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.377994 5010 scope.go:117] "RemoveContainer" containerID="9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86" Nov 26 16:23:56 crc kubenswrapper[5010]: E1126 16:23:56.378336 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86\": container with ID starting with 9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86 not found: ID does not exist" containerID="9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.378393 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86"} err="failed to get container status \"9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86\": rpc error: code = NotFound desc = could not find container \"9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86\": container with ID starting with 9262b2f1988e406c306610663ac61197fdc4de5f656c35496701932de78ade86 not found: ID does not exist" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.378426 5010 scope.go:117] "RemoveContainer" containerID="5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755" Nov 26 16:23:56 crc kubenswrapper[5010]: E1126 16:23:56.378902 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755\": container with ID starting with 5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755 not found: ID does not exist" containerID="5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755" Nov 26 16:23:56 crc kubenswrapper[5010]: I1126 16:23:56.378927 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755"} err="failed to get container status \"5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755\": rpc error: code = NotFound desc = could not find container \"5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755\": container with ID starting with 5a250fb18332f04d46bd15159b63dfa1ffd598a074645c405a64265c9b54f755 not found: ID does not exist" Nov 26 16:23:57 crc kubenswrapper[5010]: I1126 16:23:57.900816 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" path="/var/lib/kubelet/pods/88f9e25d-4585-47b4-b5f0-95a1e6866742/volumes" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.795981 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jg6ff"] Nov 26 16:24:22 crc kubenswrapper[5010]: E1126 16:24:22.802777 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="extract-content" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.803092 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="extract-content" Nov 26 16:24:22 crc kubenswrapper[5010]: E1126 16:24:22.803238 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="extract-utilities" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.803361 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="extract-utilities" Nov 26 16:24:22 crc kubenswrapper[5010]: E1126 16:24:22.803458 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="registry-server" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.803541 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="registry-server" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.803911 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="88f9e25d-4585-47b4-b5f0-95a1e6866742" containerName="registry-server" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.805181 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.825559 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-catalog-content\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.825617 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkkhs\" (UniqueName: \"kubernetes.io/projected/571c02a0-d04a-42f4-a1d2-ee8d45358baa-kube-api-access-vkkhs\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.825674 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-utilities\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.833207 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg6ff"] Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.927299 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-utilities\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.927802 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-catalog-content\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.927985 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkkhs\" (UniqueName: \"kubernetes.io/projected/571c02a0-d04a-42f4-a1d2-ee8d45358baa-kube-api-access-vkkhs\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.928219 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-utilities\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.929049 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-catalog-content\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:22 crc kubenswrapper[5010]: I1126 16:24:22.950590 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkkhs\" (UniqueName: \"kubernetes.io/projected/571c02a0-d04a-42f4-a1d2-ee8d45358baa-kube-api-access-vkkhs\") pod \"redhat-marketplace-jg6ff\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:23 crc kubenswrapper[5010]: I1126 16:24:23.138288 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:23 crc kubenswrapper[5010]: I1126 16:24:23.635783 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg6ff"] Nov 26 16:24:24 crc kubenswrapper[5010]: I1126 16:24:24.580799 5010 generic.go:334] "Generic (PLEG): container finished" podID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerID="dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1" exitCode=0 Nov 26 16:24:24 crc kubenswrapper[5010]: I1126 16:24:24.581008 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg6ff" event={"ID":"571c02a0-d04a-42f4-a1d2-ee8d45358baa","Type":"ContainerDied","Data":"dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1"} Nov 26 16:24:24 crc kubenswrapper[5010]: I1126 16:24:24.581126 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg6ff" event={"ID":"571c02a0-d04a-42f4-a1d2-ee8d45358baa","Type":"ContainerStarted","Data":"156c41efec4a3e831f5158dcc70951e0115e74d6ef2e0222a67316a2c2c1dabe"} Nov 26 16:24:26 crc kubenswrapper[5010]: I1126 16:24:26.601108 5010 generic.go:334] "Generic (PLEG): container finished" podID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerID="3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e" exitCode=0 Nov 26 16:24:26 crc kubenswrapper[5010]: I1126 16:24:26.601264 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg6ff" event={"ID":"571c02a0-d04a-42f4-a1d2-ee8d45358baa","Type":"ContainerDied","Data":"3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e"} Nov 26 16:24:27 crc kubenswrapper[5010]: I1126 16:24:27.611809 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg6ff" event={"ID":"571c02a0-d04a-42f4-a1d2-ee8d45358baa","Type":"ContainerStarted","Data":"3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35"} Nov 26 16:24:27 crc kubenswrapper[5010]: I1126 16:24:27.637220 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jg6ff" podStartSLOduration=3.167618043 podStartE2EDuration="5.637203786s" podCreationTimestamp="2025-11-26 16:24:22 +0000 UTC" firstStartedPulling="2025-11-26 16:24:24.584342009 +0000 UTC m=+3485.375059197" lastFinishedPulling="2025-11-26 16:24:27.053927792 +0000 UTC m=+3487.844644940" observedRunningTime="2025-11-26 16:24:27.635649348 +0000 UTC m=+3488.426366526" watchObservedRunningTime="2025-11-26 16:24:27.637203786 +0000 UTC m=+3488.427920934" Nov 26 16:24:33 crc kubenswrapper[5010]: I1126 16:24:33.138794 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:33 crc kubenswrapper[5010]: I1126 16:24:33.139343 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:33 crc kubenswrapper[5010]: I1126 16:24:33.192855 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:33 crc kubenswrapper[5010]: I1126 16:24:33.715809 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:33 crc kubenswrapper[5010]: I1126 16:24:33.772332 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg6ff"] Nov 26 16:24:35 crc kubenswrapper[5010]: I1126 16:24:35.681219 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jg6ff" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="registry-server" containerID="cri-o://3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35" gracePeriod=2 Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.135522 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.281867 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkkhs\" (UniqueName: \"kubernetes.io/projected/571c02a0-d04a-42f4-a1d2-ee8d45358baa-kube-api-access-vkkhs\") pod \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.281949 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-catalog-content\") pod \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.281985 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-utilities\") pod \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\" (UID: \"571c02a0-d04a-42f4-a1d2-ee8d45358baa\") " Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.283420 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-utilities" (OuterVolumeSpecName: "utilities") pod "571c02a0-d04a-42f4-a1d2-ee8d45358baa" (UID: "571c02a0-d04a-42f4-a1d2-ee8d45358baa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.289624 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/571c02a0-d04a-42f4-a1d2-ee8d45358baa-kube-api-access-vkkhs" (OuterVolumeSpecName: "kube-api-access-vkkhs") pod "571c02a0-d04a-42f4-a1d2-ee8d45358baa" (UID: "571c02a0-d04a-42f4-a1d2-ee8d45358baa"). InnerVolumeSpecName "kube-api-access-vkkhs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.299888 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "571c02a0-d04a-42f4-a1d2-ee8d45358baa" (UID: "571c02a0-d04a-42f4-a1d2-ee8d45358baa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.384390 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkkhs\" (UniqueName: \"kubernetes.io/projected/571c02a0-d04a-42f4-a1d2-ee8d45358baa-kube-api-access-vkkhs\") on node \"crc\" DevicePath \"\"" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.384443 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.384461 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/571c02a0-d04a-42f4-a1d2-ee8d45358baa-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.692808 5010 generic.go:334] "Generic (PLEG): container finished" podID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerID="3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35" exitCode=0 Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.692871 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg6ff" event={"ID":"571c02a0-d04a-42f4-a1d2-ee8d45358baa","Type":"ContainerDied","Data":"3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35"} Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.692960 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jg6ff" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.692981 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jg6ff" event={"ID":"571c02a0-d04a-42f4-a1d2-ee8d45358baa","Type":"ContainerDied","Data":"156c41efec4a3e831f5158dcc70951e0115e74d6ef2e0222a67316a2c2c1dabe"} Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.693028 5010 scope.go:117] "RemoveContainer" containerID="3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.730615 5010 scope.go:117] "RemoveContainer" containerID="3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.751555 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg6ff"] Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.757484 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jg6ff"] Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.766500 5010 scope.go:117] "RemoveContainer" containerID="dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.792381 5010 scope.go:117] "RemoveContainer" containerID="3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35" Nov 26 16:24:36 crc kubenswrapper[5010]: E1126 16:24:36.793072 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35\": container with ID starting with 3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35 not found: ID does not exist" containerID="3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.793115 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35"} err="failed to get container status \"3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35\": rpc error: code = NotFound desc = could not find container \"3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35\": container with ID starting with 3c9a20223b6dd7287cac7f489df12750042efe0aad0c6fae19dd7bb7d9490e35 not found: ID does not exist" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.793142 5010 scope.go:117] "RemoveContainer" containerID="3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e" Nov 26 16:24:36 crc kubenswrapper[5010]: E1126 16:24:36.793769 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e\": container with ID starting with 3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e not found: ID does not exist" containerID="3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.793817 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e"} err="failed to get container status \"3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e\": rpc error: code = NotFound desc = could not find container \"3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e\": container with ID starting with 3943a7734e193ec3af4ac46f11ebab20ceb2e520ff8412c12bc191d1ea9b803e not found: ID does not exist" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.793847 5010 scope.go:117] "RemoveContainer" containerID="dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1" Nov 26 16:24:36 crc kubenswrapper[5010]: E1126 16:24:36.794299 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1\": container with ID starting with dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1 not found: ID does not exist" containerID="dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1" Nov 26 16:24:36 crc kubenswrapper[5010]: I1126 16:24:36.794361 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1"} err="failed to get container status \"dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1\": rpc error: code = NotFound desc = could not find container \"dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1\": container with ID starting with dc63ece605a3fdbecdbf2db95418762467df8be08c13bb3d1afb7db7ef875be1 not found: ID does not exist" Nov 26 16:24:37 crc kubenswrapper[5010]: I1126 16:24:37.902449 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" path="/var/lib/kubelet/pods/571c02a0-d04a-42f4-a1d2-ee8d45358baa/volumes" Nov 26 16:25:11 crc kubenswrapper[5010]: I1126 16:25:11.423294 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:25:11 crc kubenswrapper[5010]: I1126 16:25:11.423893 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:25:41 crc kubenswrapper[5010]: I1126 16:25:41.422319 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:25:41 crc kubenswrapper[5010]: I1126 16:25:41.423004 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.422964 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.423652 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.423743 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.424602 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.424745 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" gracePeriod=600 Nov 26 16:26:11 crc kubenswrapper[5010]: E1126 16:26:11.547542 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.640676 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" exitCode=0 Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.640741 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82"} Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.640780 5010 scope.go:117] "RemoveContainer" containerID="19d41a574349f70822f552c86d7cd5e4d8943054274493eef9c7c965cd37ca2a" Nov 26 16:26:11 crc kubenswrapper[5010]: I1126 16:26:11.641561 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:26:11 crc kubenswrapper[5010]: E1126 16:26:11.642986 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:26:24 crc kubenswrapper[5010]: I1126 16:26:24.892094 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:26:24 crc kubenswrapper[5010]: E1126 16:26:24.892760 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:26:36 crc kubenswrapper[5010]: I1126 16:26:36.891623 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:26:36 crc kubenswrapper[5010]: E1126 16:26:36.892745 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:26:49 crc kubenswrapper[5010]: I1126 16:26:49.897581 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:26:49 crc kubenswrapper[5010]: E1126 16:26:49.898605 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:27:03 crc kubenswrapper[5010]: I1126 16:27:03.892154 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:27:03 crc kubenswrapper[5010]: E1126 16:27:03.893126 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:27:14 crc kubenswrapper[5010]: I1126 16:27:14.892581 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:27:14 crc kubenswrapper[5010]: E1126 16:27:14.894192 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:27:28 crc kubenswrapper[5010]: I1126 16:27:28.892069 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:27:28 crc kubenswrapper[5010]: E1126 16:27:28.892908 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:27:43 crc kubenswrapper[5010]: I1126 16:27:43.892344 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:27:43 crc kubenswrapper[5010]: E1126 16:27:43.893212 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.050326 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-zfgm4"] Nov 26 16:27:53 crc kubenswrapper[5010]: E1126 16:27:53.051371 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="registry-server" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.051388 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="registry-server" Nov 26 16:27:53 crc kubenswrapper[5010]: E1126 16:27:53.051405 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="extract-content" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.051413 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="extract-content" Nov 26 16:27:53 crc kubenswrapper[5010]: E1126 16:27:53.051427 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="extract-utilities" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.051436 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="extract-utilities" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.051633 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="571c02a0-d04a-42f4-a1d2-ee8d45358baa" containerName="registry-server" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.053071 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.064770 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zfgm4"] Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.155371 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-catalog-content\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.155721 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-49tq2\" (UniqueName: \"kubernetes.io/projected/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-kube-api-access-49tq2\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.155758 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-utilities\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.256294 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-49tq2\" (UniqueName: \"kubernetes.io/projected/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-kube-api-access-49tq2\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.256341 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-utilities\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.256416 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-catalog-content\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.256807 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-catalog-content\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.257028 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-utilities\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.282698 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-49tq2\" (UniqueName: \"kubernetes.io/projected/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-kube-api-access-49tq2\") pod \"community-operators-zfgm4\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.386670 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:27:53 crc kubenswrapper[5010]: I1126 16:27:53.924513 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-zfgm4"] Nov 26 16:27:53 crc kubenswrapper[5010]: W1126 16:27:53.930751 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ccd5b3d_09fd_4d84_8126_1785dea5d7e0.slice/crio-2f3484ebe35334e2f942c24ea4b8843d6e5101bd205a6570dd76af6e6b9a1979 WatchSource:0}: Error finding container 2f3484ebe35334e2f942c24ea4b8843d6e5101bd205a6570dd76af6e6b9a1979: Status 404 returned error can't find the container with id 2f3484ebe35334e2f942c24ea4b8843d6e5101bd205a6570dd76af6e6b9a1979 Nov 26 16:27:54 crc kubenswrapper[5010]: E1126 16:27:54.236433 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ccd5b3d_09fd_4d84_8126_1785dea5d7e0.slice/crio-c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:27:54 crc kubenswrapper[5010]: I1126 16:27:54.558419 5010 generic.go:334] "Generic (PLEG): container finished" podID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerID="c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf" exitCode=0 Nov 26 16:27:54 crc kubenswrapper[5010]: I1126 16:27:54.558481 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zfgm4" event={"ID":"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0","Type":"ContainerDied","Data":"c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf"} Nov 26 16:27:54 crc kubenswrapper[5010]: I1126 16:27:54.558518 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zfgm4" event={"ID":"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0","Type":"ContainerStarted","Data":"2f3484ebe35334e2f942c24ea4b8843d6e5101bd205a6570dd76af6e6b9a1979"} Nov 26 16:27:54 crc kubenswrapper[5010]: I1126 16:27:54.562181 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:27:54 crc kubenswrapper[5010]: I1126 16:27:54.891159 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:27:54 crc kubenswrapper[5010]: E1126 16:27:54.891650 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:27:55 crc kubenswrapper[5010]: I1126 16:27:55.567584 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zfgm4" event={"ID":"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0","Type":"ContainerStarted","Data":"6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7"} Nov 26 16:27:56 crc kubenswrapper[5010]: I1126 16:27:56.580430 5010 generic.go:334] "Generic (PLEG): container finished" podID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerID="6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7" exitCode=0 Nov 26 16:27:56 crc kubenswrapper[5010]: I1126 16:27:56.580493 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zfgm4" event={"ID":"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0","Type":"ContainerDied","Data":"6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7"} Nov 26 16:27:57 crc kubenswrapper[5010]: I1126 16:27:57.593167 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zfgm4" event={"ID":"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0","Type":"ContainerStarted","Data":"83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d"} Nov 26 16:27:57 crc kubenswrapper[5010]: I1126 16:27:57.619283 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-zfgm4" podStartSLOduration=2.075119271 podStartE2EDuration="4.619264887s" podCreationTimestamp="2025-11-26 16:27:53 +0000 UTC" firstStartedPulling="2025-11-26 16:27:54.561778983 +0000 UTC m=+3695.352496171" lastFinishedPulling="2025-11-26 16:27:57.105924629 +0000 UTC m=+3697.896641787" observedRunningTime="2025-11-26 16:27:57.614290103 +0000 UTC m=+3698.405007251" watchObservedRunningTime="2025-11-26 16:27:57.619264887 +0000 UTC m=+3698.409982035" Nov 26 16:28:03 crc kubenswrapper[5010]: I1126 16:28:03.387764 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:28:03 crc kubenswrapper[5010]: I1126 16:28:03.388021 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:28:03 crc kubenswrapper[5010]: I1126 16:28:03.463533 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:28:03 crc kubenswrapper[5010]: I1126 16:28:03.733687 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:28:03 crc kubenswrapper[5010]: I1126 16:28:03.791152 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zfgm4"] Nov 26 16:28:05 crc kubenswrapper[5010]: I1126 16:28:05.658170 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-zfgm4" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="registry-server" containerID="cri-o://83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d" gracePeriod=2 Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.145064 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.262680 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-utilities\") pod \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.262831 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-49tq2\" (UniqueName: \"kubernetes.io/projected/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-kube-api-access-49tq2\") pod \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.262858 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-catalog-content\") pod \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\" (UID: \"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0\") " Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.263627 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-utilities" (OuterVolumeSpecName: "utilities") pod "8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" (UID: "8ccd5b3d-09fd-4d84-8126-1785dea5d7e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.269181 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-kube-api-access-49tq2" (OuterVolumeSpecName: "kube-api-access-49tq2") pod "8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" (UID: "8ccd5b3d-09fd-4d84-8126-1785dea5d7e0"). InnerVolumeSpecName "kube-api-access-49tq2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.326117 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" (UID: "8ccd5b3d-09fd-4d84-8126-1785dea5d7e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.364992 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.365060 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-49tq2\" (UniqueName: \"kubernetes.io/projected/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-kube-api-access-49tq2\") on node \"crc\" DevicePath \"\"" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.365077 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.667823 5010 generic.go:334] "Generic (PLEG): container finished" podID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerID="83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d" exitCode=0 Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.667861 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zfgm4" event={"ID":"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0","Type":"ContainerDied","Data":"83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d"} Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.667886 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-zfgm4" event={"ID":"8ccd5b3d-09fd-4d84-8126-1785dea5d7e0","Type":"ContainerDied","Data":"2f3484ebe35334e2f942c24ea4b8843d6e5101bd205a6570dd76af6e6b9a1979"} Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.667912 5010 scope.go:117] "RemoveContainer" containerID="83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.668024 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-zfgm4" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.692933 5010 scope.go:117] "RemoveContainer" containerID="6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.712780 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-zfgm4"] Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.718794 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-zfgm4"] Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.744777 5010 scope.go:117] "RemoveContainer" containerID="c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.760533 5010 scope.go:117] "RemoveContainer" containerID="83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d" Nov 26 16:28:06 crc kubenswrapper[5010]: E1126 16:28:06.760998 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d\": container with ID starting with 83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d not found: ID does not exist" containerID="83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.761055 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d"} err="failed to get container status \"83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d\": rpc error: code = NotFound desc = could not find container \"83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d\": container with ID starting with 83d640fc8cb8e96fd5ea4ebb4f57706b7c21ae2b50c34791235a44d49534143d not found: ID does not exist" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.761092 5010 scope.go:117] "RemoveContainer" containerID="6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7" Nov 26 16:28:06 crc kubenswrapper[5010]: E1126 16:28:06.761534 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7\": container with ID starting with 6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7 not found: ID does not exist" containerID="6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.761575 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7"} err="failed to get container status \"6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7\": rpc error: code = NotFound desc = could not find container \"6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7\": container with ID starting with 6365b89060d4df57c54511847620485a6c9231c9887136bc438ad8fc6dc7f8f7 not found: ID does not exist" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.761602 5010 scope.go:117] "RemoveContainer" containerID="c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf" Nov 26 16:28:06 crc kubenswrapper[5010]: E1126 16:28:06.761920 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf\": container with ID starting with c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf not found: ID does not exist" containerID="c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.761961 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf"} err="failed to get container status \"c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf\": rpc error: code = NotFound desc = could not find container \"c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf\": container with ID starting with c95dbf09e78b7b49dcb14fb7d47cec7e13b8290fb5f138b4e468e7bbd7be9ccf not found: ID does not exist" Nov 26 16:28:06 crc kubenswrapper[5010]: I1126 16:28:06.891653 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:28:06 crc kubenswrapper[5010]: E1126 16:28:06.892259 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:28:07 crc kubenswrapper[5010]: I1126 16:28:07.902759 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" path="/var/lib/kubelet/pods/8ccd5b3d-09fd-4d84-8126-1785dea5d7e0/volumes" Nov 26 16:28:21 crc kubenswrapper[5010]: I1126 16:28:21.891462 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:28:21 crc kubenswrapper[5010]: E1126 16:28:21.892694 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:28:32 crc kubenswrapper[5010]: I1126 16:28:32.891432 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:28:32 crc kubenswrapper[5010]: E1126 16:28:32.892186 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:28:47 crc kubenswrapper[5010]: I1126 16:28:47.892370 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:28:47 crc kubenswrapper[5010]: E1126 16:28:47.893096 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:28:59 crc kubenswrapper[5010]: I1126 16:28:59.902811 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:28:59 crc kubenswrapper[5010]: E1126 16:28:59.903979 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:29:13 crc kubenswrapper[5010]: I1126 16:29:13.892637 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:29:13 crc kubenswrapper[5010]: E1126 16:29:13.894099 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:29:24 crc kubenswrapper[5010]: I1126 16:29:24.891154 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:29:24 crc kubenswrapper[5010]: E1126 16:29:24.892130 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:29:37 crc kubenswrapper[5010]: I1126 16:29:37.891773 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:29:37 crc kubenswrapper[5010]: E1126 16:29:37.893049 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:29:51 crc kubenswrapper[5010]: I1126 16:29:51.891335 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:29:51 crc kubenswrapper[5010]: E1126 16:29:51.893103 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.157398 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5"] Nov 26 16:30:00 crc kubenswrapper[5010]: E1126 16:30:00.159269 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="registry-server" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.159371 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="registry-server" Nov 26 16:30:00 crc kubenswrapper[5010]: E1126 16:30:00.159444 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="extract-utilities" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.159528 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="extract-utilities" Nov 26 16:30:00 crc kubenswrapper[5010]: E1126 16:30:00.159632 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="extract-content" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.159805 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="extract-content" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.160044 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ccd5b3d-09fd-4d84-8126-1785dea5d7e0" containerName="registry-server" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.162073 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.164560 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.165081 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.173741 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5"] Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.250576 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d320b698-2ff1-407a-9659-75e46bb26aec-config-volume\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.250622 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d320b698-2ff1-407a-9659-75e46bb26aec-secret-volume\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.250682 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdn4n\" (UniqueName: \"kubernetes.io/projected/d320b698-2ff1-407a-9659-75e46bb26aec-kube-api-access-xdn4n\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.351572 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d320b698-2ff1-407a-9659-75e46bb26aec-secret-volume\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.351633 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdn4n\" (UniqueName: \"kubernetes.io/projected/d320b698-2ff1-407a-9659-75e46bb26aec-kube-api-access-xdn4n\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.351692 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d320b698-2ff1-407a-9659-75e46bb26aec-config-volume\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.352456 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d320b698-2ff1-407a-9659-75e46bb26aec-config-volume\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.357953 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d320b698-2ff1-407a-9659-75e46bb26aec-secret-volume\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.367306 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdn4n\" (UniqueName: \"kubernetes.io/projected/d320b698-2ff1-407a-9659-75e46bb26aec-kube-api-access-xdn4n\") pod \"collect-profiles-29402910-z2rv5\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.496658 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:00 crc kubenswrapper[5010]: I1126 16:30:00.972834 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5"] Nov 26 16:30:01 crc kubenswrapper[5010]: I1126 16:30:01.713004 5010 generic.go:334] "Generic (PLEG): container finished" podID="d320b698-2ff1-407a-9659-75e46bb26aec" containerID="d52b166b66babe1c7de3112556ec64b4c42b803e0f1b9cf1a79ba27e870df93e" exitCode=0 Nov 26 16:30:01 crc kubenswrapper[5010]: I1126 16:30:01.713379 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" event={"ID":"d320b698-2ff1-407a-9659-75e46bb26aec","Type":"ContainerDied","Data":"d52b166b66babe1c7de3112556ec64b4c42b803e0f1b9cf1a79ba27e870df93e"} Nov 26 16:30:01 crc kubenswrapper[5010]: I1126 16:30:01.713422 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" event={"ID":"d320b698-2ff1-407a-9659-75e46bb26aec","Type":"ContainerStarted","Data":"61a7e735c356e0092608be3c9fdb972060268b5f3b979a3a1ad2d568e287a59d"} Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.008119 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.191577 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d320b698-2ff1-407a-9659-75e46bb26aec-secret-volume\") pod \"d320b698-2ff1-407a-9659-75e46bb26aec\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.191665 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d320b698-2ff1-407a-9659-75e46bb26aec-config-volume\") pod \"d320b698-2ff1-407a-9659-75e46bb26aec\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.191820 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xdn4n\" (UniqueName: \"kubernetes.io/projected/d320b698-2ff1-407a-9659-75e46bb26aec-kube-api-access-xdn4n\") pod \"d320b698-2ff1-407a-9659-75e46bb26aec\" (UID: \"d320b698-2ff1-407a-9659-75e46bb26aec\") " Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.192860 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d320b698-2ff1-407a-9659-75e46bb26aec-config-volume" (OuterVolumeSpecName: "config-volume") pod "d320b698-2ff1-407a-9659-75e46bb26aec" (UID: "d320b698-2ff1-407a-9659-75e46bb26aec"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.197765 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d320b698-2ff1-407a-9659-75e46bb26aec-kube-api-access-xdn4n" (OuterVolumeSpecName: "kube-api-access-xdn4n") pod "d320b698-2ff1-407a-9659-75e46bb26aec" (UID: "d320b698-2ff1-407a-9659-75e46bb26aec"). InnerVolumeSpecName "kube-api-access-xdn4n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.200853 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d320b698-2ff1-407a-9659-75e46bb26aec-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d320b698-2ff1-407a-9659-75e46bb26aec" (UID: "d320b698-2ff1-407a-9659-75e46bb26aec"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.294195 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d320b698-2ff1-407a-9659-75e46bb26aec-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.294271 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d320b698-2ff1-407a-9659-75e46bb26aec-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.294298 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xdn4n\" (UniqueName: \"kubernetes.io/projected/d320b698-2ff1-407a-9659-75e46bb26aec-kube-api-access-xdn4n\") on node \"crc\" DevicePath \"\"" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.735769 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" event={"ID":"d320b698-2ff1-407a-9659-75e46bb26aec","Type":"ContainerDied","Data":"61a7e735c356e0092608be3c9fdb972060268b5f3b979a3a1ad2d568e287a59d"} Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.735821 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61a7e735c356e0092608be3c9fdb972060268b5f3b979a3a1ad2d568e287a59d" Nov 26 16:30:03 crc kubenswrapper[5010]: I1126 16:30:03.735850 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5" Nov 26 16:30:04 crc kubenswrapper[5010]: I1126 16:30:04.093966 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl"] Nov 26 16:30:04 crc kubenswrapper[5010]: I1126 16:30:04.105078 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402865-gzsvl"] Nov 26 16:30:05 crc kubenswrapper[5010]: I1126 16:30:05.906118 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f957d9d-bfaf-449a-be3a-a20de204b99b" path="/var/lib/kubelet/pods/9f957d9d-bfaf-449a-be3a-a20de204b99b/volumes" Nov 26 16:30:06 crc kubenswrapper[5010]: I1126 16:30:06.891969 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:30:06 crc kubenswrapper[5010]: E1126 16:30:06.892401 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:30:21 crc kubenswrapper[5010]: I1126 16:30:21.908235 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:30:21 crc kubenswrapper[5010]: E1126 16:30:21.909942 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:30:35 crc kubenswrapper[5010]: I1126 16:30:35.891652 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:30:35 crc kubenswrapper[5010]: E1126 16:30:35.892744 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:30:49 crc kubenswrapper[5010]: I1126 16:30:49.900264 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:30:49 crc kubenswrapper[5010]: E1126 16:30:49.901129 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:31:03 crc kubenswrapper[5010]: I1126 16:31:03.892179 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:31:03 crc kubenswrapper[5010]: E1126 16:31:03.894972 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:31:04 crc kubenswrapper[5010]: I1126 16:31:04.212636 5010 scope.go:117] "RemoveContainer" containerID="63ab4d5dd60774b6439a77917b8e090bf455e9c84964e03e121136838bfd668e" Nov 26 16:31:14 crc kubenswrapper[5010]: I1126 16:31:14.892699 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:31:15 crc kubenswrapper[5010]: I1126 16:31:15.365982 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"ceac9ea4c67a4d52c2e36ff86bd795f4bcf9d6de60b38bef3ccbb46004f4a814"} Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.812523 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wk7dd"] Nov 26 16:32:28 crc kubenswrapper[5010]: E1126 16:32:28.813620 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d320b698-2ff1-407a-9659-75e46bb26aec" containerName="collect-profiles" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.813645 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d320b698-2ff1-407a-9659-75e46bb26aec" containerName="collect-profiles" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.813983 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d320b698-2ff1-407a-9659-75e46bb26aec" containerName="collect-profiles" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.816155 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.838202 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wk7dd"] Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.897010 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-catalog-content\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.897065 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-utilities\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.897125 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w947g\" (UniqueName: \"kubernetes.io/projected/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-kube-api-access-w947g\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.999269 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-catalog-content\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.999369 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-utilities\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:28 crc kubenswrapper[5010]: I1126 16:32:28.999496 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w947g\" (UniqueName: \"kubernetes.io/projected/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-kube-api-access-w947g\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:29 crc kubenswrapper[5010]: I1126 16:32:29.000828 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-catalog-content\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:29 crc kubenswrapper[5010]: I1126 16:32:29.000979 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-utilities\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:29 crc kubenswrapper[5010]: I1126 16:32:29.049081 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w947g\" (UniqueName: \"kubernetes.io/projected/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-kube-api-access-w947g\") pod \"redhat-operators-wk7dd\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:29 crc kubenswrapper[5010]: I1126 16:32:29.163945 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:29 crc kubenswrapper[5010]: I1126 16:32:29.601347 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wk7dd"] Nov 26 16:32:29 crc kubenswrapper[5010]: E1126 16:32:29.910034 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cc5cd3b_a32d_4870_8dcb_ba6489d910b6.slice/crio-bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5cc5cd3b_a32d_4870_8dcb_ba6489d910b6.slice/crio-conmon-bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:32:30 crc kubenswrapper[5010]: I1126 16:32:30.081342 5010 generic.go:334] "Generic (PLEG): container finished" podID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerID="bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a" exitCode=0 Nov 26 16:32:30 crc kubenswrapper[5010]: I1126 16:32:30.081394 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wk7dd" event={"ID":"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6","Type":"ContainerDied","Data":"bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a"} Nov 26 16:32:30 crc kubenswrapper[5010]: I1126 16:32:30.081464 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wk7dd" event={"ID":"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6","Type":"ContainerStarted","Data":"88aff72cca182392d60bd5e9765ce4367c055a7c523b849027757b69c783a145"} Nov 26 16:32:32 crc kubenswrapper[5010]: I1126 16:32:32.102096 5010 generic.go:334] "Generic (PLEG): container finished" podID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerID="82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a" exitCode=0 Nov 26 16:32:32 crc kubenswrapper[5010]: I1126 16:32:32.102191 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wk7dd" event={"ID":"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6","Type":"ContainerDied","Data":"82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a"} Nov 26 16:32:34 crc kubenswrapper[5010]: I1126 16:32:34.137240 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wk7dd" event={"ID":"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6","Type":"ContainerStarted","Data":"33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c"} Nov 26 16:32:34 crc kubenswrapper[5010]: I1126 16:32:34.168230 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wk7dd" podStartSLOduration=3.161976179 podStartE2EDuration="6.168192364s" podCreationTimestamp="2025-11-26 16:32:28 +0000 UTC" firstStartedPulling="2025-11-26 16:32:30.082686408 +0000 UTC m=+3970.873403556" lastFinishedPulling="2025-11-26 16:32:33.088902553 +0000 UTC m=+3973.879619741" observedRunningTime="2025-11-26 16:32:34.163613121 +0000 UTC m=+3974.954330279" watchObservedRunningTime="2025-11-26 16:32:34.168192364 +0000 UTC m=+3974.958909512" Nov 26 16:32:39 crc kubenswrapper[5010]: I1126 16:32:39.164664 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:39 crc kubenswrapper[5010]: I1126 16:32:39.165587 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:40 crc kubenswrapper[5010]: I1126 16:32:40.242507 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wk7dd" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="registry-server" probeResult="failure" output=< Nov 26 16:32:40 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 16:32:40 crc kubenswrapper[5010]: > Nov 26 16:32:49 crc kubenswrapper[5010]: I1126 16:32:49.248676 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:49 crc kubenswrapper[5010]: I1126 16:32:49.325052 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:49 crc kubenswrapper[5010]: I1126 16:32:49.501312 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wk7dd"] Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.296996 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wk7dd" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="registry-server" containerID="cri-o://33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c" gracePeriod=2 Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.702243 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.852675 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-catalog-content\") pod \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.852801 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-utilities\") pod \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.852840 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w947g\" (UniqueName: \"kubernetes.io/projected/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-kube-api-access-w947g\") pod \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\" (UID: \"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6\") " Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.854696 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-utilities" (OuterVolumeSpecName: "utilities") pod "5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" (UID: "5cc5cd3b-a32d-4870-8dcb-ba6489d910b6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.862349 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-kube-api-access-w947g" (OuterVolumeSpecName: "kube-api-access-w947g") pod "5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" (UID: "5cc5cd3b-a32d-4870-8dcb-ba6489d910b6"). InnerVolumeSpecName "kube-api-access-w947g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.941978 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" (UID: "5cc5cd3b-a32d-4870-8dcb-ba6489d910b6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.955262 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.955308 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:32:50 crc kubenswrapper[5010]: I1126 16:32:50.955318 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w947g\" (UniqueName: \"kubernetes.io/projected/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6-kube-api-access-w947g\") on node \"crc\" DevicePath \"\"" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.307306 5010 generic.go:334] "Generic (PLEG): container finished" podID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerID="33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c" exitCode=0 Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.307371 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wk7dd" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.307373 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wk7dd" event={"ID":"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6","Type":"ContainerDied","Data":"33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c"} Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.307547 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wk7dd" event={"ID":"5cc5cd3b-a32d-4870-8dcb-ba6489d910b6","Type":"ContainerDied","Data":"88aff72cca182392d60bd5e9765ce4367c055a7c523b849027757b69c783a145"} Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.307577 5010 scope.go:117] "RemoveContainer" containerID="33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.340454 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wk7dd"] Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.340612 5010 scope.go:117] "RemoveContainer" containerID="82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.347327 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wk7dd"] Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.424363 5010 scope.go:117] "RemoveContainer" containerID="bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.445469 5010 scope.go:117] "RemoveContainer" containerID="33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c" Nov 26 16:32:51 crc kubenswrapper[5010]: E1126 16:32:51.445965 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c\": container with ID starting with 33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c not found: ID does not exist" containerID="33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.446070 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c"} err="failed to get container status \"33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c\": rpc error: code = NotFound desc = could not find container \"33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c\": container with ID starting with 33240457e80d4d70e3e3a36b2b1c0b757c87b76e062f2b55cc1aed0a5b65fe1c not found: ID does not exist" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.446104 5010 scope.go:117] "RemoveContainer" containerID="82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a" Nov 26 16:32:51 crc kubenswrapper[5010]: E1126 16:32:51.446353 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a\": container with ID starting with 82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a not found: ID does not exist" containerID="82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.446387 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a"} err="failed to get container status \"82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a\": rpc error: code = NotFound desc = could not find container \"82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a\": container with ID starting with 82ca679a7316a647c7b9371d11aef2958196f58edb87c416b8d4f5be378c988a not found: ID does not exist" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.446405 5010 scope.go:117] "RemoveContainer" containerID="bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a" Nov 26 16:32:51 crc kubenswrapper[5010]: E1126 16:32:51.446647 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a\": container with ID starting with bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a not found: ID does not exist" containerID="bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.446675 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a"} err="failed to get container status \"bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a\": rpc error: code = NotFound desc = could not find container \"bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a\": container with ID starting with bf7206893a3b6f34129a3fc61b65018d18e29a25d22d3ed320c73019d43e6b5a not found: ID does not exist" Nov 26 16:32:51 crc kubenswrapper[5010]: I1126 16:32:51.909795 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" path="/var/lib/kubelet/pods/5cc5cd3b-a32d-4870-8dcb-ba6489d910b6/volumes" Nov 26 16:33:41 crc kubenswrapper[5010]: I1126 16:33:41.422332 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:33:41 crc kubenswrapper[5010]: I1126 16:33:41.423162 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:34:11 crc kubenswrapper[5010]: I1126 16:34:11.422516 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:34:11 crc kubenswrapper[5010]: I1126 16:34:11.424869 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:34:41 crc kubenswrapper[5010]: I1126 16:34:41.422705 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:34:41 crc kubenswrapper[5010]: I1126 16:34:41.423855 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:34:41 crc kubenswrapper[5010]: I1126 16:34:41.423986 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:34:41 crc kubenswrapper[5010]: I1126 16:34:41.425812 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ceac9ea4c67a4d52c2e36ff86bd795f4bcf9d6de60b38bef3ccbb46004f4a814"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:34:41 crc kubenswrapper[5010]: I1126 16:34:41.425906 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://ceac9ea4c67a4d52c2e36ff86bd795f4bcf9d6de60b38bef3ccbb46004f4a814" gracePeriod=600 Nov 26 16:34:42 crc kubenswrapper[5010]: I1126 16:34:42.443655 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="ceac9ea4c67a4d52c2e36ff86bd795f4bcf9d6de60b38bef3ccbb46004f4a814" exitCode=0 Nov 26 16:34:42 crc kubenswrapper[5010]: I1126 16:34:42.443777 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"ceac9ea4c67a4d52c2e36ff86bd795f4bcf9d6de60b38bef3ccbb46004f4a814"} Nov 26 16:34:42 crc kubenswrapper[5010]: I1126 16:34:42.444557 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff"} Nov 26 16:34:42 crc kubenswrapper[5010]: I1126 16:34:42.444597 5010 scope.go:117] "RemoveContainer" containerID="4f0a5ff5c47e173aa41d8af945507e6da82dcac35cfae3de7aa8956d931edb82" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.042662 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lpft4"] Nov 26 16:35:46 crc kubenswrapper[5010]: E1126 16:35:46.044097 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="extract-utilities" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.044125 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="extract-utilities" Nov 26 16:35:46 crc kubenswrapper[5010]: E1126 16:35:46.044153 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="extract-content" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.044165 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="extract-content" Nov 26 16:35:46 crc kubenswrapper[5010]: E1126 16:35:46.044206 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="registry-server" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.044219 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="registry-server" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.044487 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cc5cd3b-a32d-4870-8dcb-ba6489d910b6" containerName="registry-server" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.046372 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.062352 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpft4"] Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.102524 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-catalog-content\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.103005 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-utilities\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.103044 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf54r\" (UniqueName: \"kubernetes.io/projected/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-kube-api-access-kf54r\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.204235 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-catalog-content\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.204584 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-utilities\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.204727 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf54r\" (UniqueName: \"kubernetes.io/projected/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-kube-api-access-kf54r\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.204990 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-catalog-content\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.205083 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-utilities\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.225609 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf54r\" (UniqueName: \"kubernetes.io/projected/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-kube-api-access-kf54r\") pod \"redhat-marketplace-lpft4\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.380479 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:46 crc kubenswrapper[5010]: I1126 16:35:46.885069 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpft4"] Nov 26 16:35:47 crc kubenswrapper[5010]: I1126 16:35:47.070037 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpft4" event={"ID":"7446dba9-b27d-4e0e-84d2-dd694d7b2a05","Type":"ContainerStarted","Data":"98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67"} Nov 26 16:35:47 crc kubenswrapper[5010]: I1126 16:35:47.070463 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpft4" event={"ID":"7446dba9-b27d-4e0e-84d2-dd694d7b2a05","Type":"ContainerStarted","Data":"6b40ceb5144463656f764d126fc7b4f2710630b19c4be40a2c8b2b6014a4d55a"} Nov 26 16:35:47 crc kubenswrapper[5010]: I1126 16:35:47.072267 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:35:48 crc kubenswrapper[5010]: I1126 16:35:48.081789 5010 generic.go:334] "Generic (PLEG): container finished" podID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerID="98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67" exitCode=0 Nov 26 16:35:48 crc kubenswrapper[5010]: I1126 16:35:48.081925 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpft4" event={"ID":"7446dba9-b27d-4e0e-84d2-dd694d7b2a05","Type":"ContainerDied","Data":"98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67"} Nov 26 16:35:51 crc kubenswrapper[5010]: I1126 16:35:51.111180 5010 generic.go:334] "Generic (PLEG): container finished" podID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerID="437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87" exitCode=0 Nov 26 16:35:51 crc kubenswrapper[5010]: I1126 16:35:51.111354 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpft4" event={"ID":"7446dba9-b27d-4e0e-84d2-dd694d7b2a05","Type":"ContainerDied","Data":"437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87"} Nov 26 16:35:52 crc kubenswrapper[5010]: I1126 16:35:52.125049 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpft4" event={"ID":"7446dba9-b27d-4e0e-84d2-dd694d7b2a05","Type":"ContainerStarted","Data":"49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4"} Nov 26 16:35:52 crc kubenswrapper[5010]: I1126 16:35:52.149354 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lpft4" podStartSLOduration=1.640137369 podStartE2EDuration="6.149335523s" podCreationTimestamp="2025-11-26 16:35:46 +0000 UTC" firstStartedPulling="2025-11-26 16:35:47.071781163 +0000 UTC m=+4167.862498321" lastFinishedPulling="2025-11-26 16:35:51.580979287 +0000 UTC m=+4172.371696475" observedRunningTime="2025-11-26 16:35:52.147459266 +0000 UTC m=+4172.938176444" watchObservedRunningTime="2025-11-26 16:35:52.149335523 +0000 UTC m=+4172.940052671" Nov 26 16:35:56 crc kubenswrapper[5010]: I1126 16:35:56.380761 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:56 crc kubenswrapper[5010]: I1126 16:35:56.381628 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:56 crc kubenswrapper[5010]: I1126 16:35:56.444204 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:57 crc kubenswrapper[5010]: I1126 16:35:57.244516 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:57 crc kubenswrapper[5010]: I1126 16:35:57.313685 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpft4"] Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.190544 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lpft4" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="registry-server" containerID="cri-o://49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4" gracePeriod=2 Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.649765 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.719946 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-utilities\") pod \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.719987 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-catalog-content\") pod \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.720027 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kf54r\" (UniqueName: \"kubernetes.io/projected/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-kube-api-access-kf54r\") pod \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\" (UID: \"7446dba9-b27d-4e0e-84d2-dd694d7b2a05\") " Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.720815 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-utilities" (OuterVolumeSpecName: "utilities") pod "7446dba9-b27d-4e0e-84d2-dd694d7b2a05" (UID: "7446dba9-b27d-4e0e-84d2-dd694d7b2a05"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.725038 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-kube-api-access-kf54r" (OuterVolumeSpecName: "kube-api-access-kf54r") pod "7446dba9-b27d-4e0e-84d2-dd694d7b2a05" (UID: "7446dba9-b27d-4e0e-84d2-dd694d7b2a05"). InnerVolumeSpecName "kube-api-access-kf54r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.738155 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7446dba9-b27d-4e0e-84d2-dd694d7b2a05" (UID: "7446dba9-b27d-4e0e-84d2-dd694d7b2a05"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.821533 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.821559 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:35:59 crc kubenswrapper[5010]: I1126 16:35:59.821570 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kf54r\" (UniqueName: \"kubernetes.io/projected/7446dba9-b27d-4e0e-84d2-dd694d7b2a05-kube-api-access-kf54r\") on node \"crc\" DevicePath \"\"" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.203975 5010 generic.go:334] "Generic (PLEG): container finished" podID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerID="49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4" exitCode=0 Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.204016 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpft4" event={"ID":"7446dba9-b27d-4e0e-84d2-dd694d7b2a05","Type":"ContainerDied","Data":"49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4"} Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.204042 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpft4" event={"ID":"7446dba9-b27d-4e0e-84d2-dd694d7b2a05","Type":"ContainerDied","Data":"6b40ceb5144463656f764d126fc7b4f2710630b19c4be40a2c8b2b6014a4d55a"} Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.204058 5010 scope.go:117] "RemoveContainer" containerID="49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.204104 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpft4" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.237241 5010 scope.go:117] "RemoveContainer" containerID="437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.245017 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpft4"] Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.253983 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpft4"] Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.258113 5010 scope.go:117] "RemoveContainer" containerID="98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.284263 5010 scope.go:117] "RemoveContainer" containerID="49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4" Nov 26 16:36:00 crc kubenswrapper[5010]: E1126 16:36:00.285239 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4\": container with ID starting with 49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4 not found: ID does not exist" containerID="49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.285291 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4"} err="failed to get container status \"49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4\": rpc error: code = NotFound desc = could not find container \"49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4\": container with ID starting with 49b2801b97cb19857dea438df31f50daa84fe1b4547c4fa800a8b41979d4c5e4 not found: ID does not exist" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.285321 5010 scope.go:117] "RemoveContainer" containerID="437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87" Nov 26 16:36:00 crc kubenswrapper[5010]: E1126 16:36:00.286373 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87\": container with ID starting with 437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87 not found: ID does not exist" containerID="437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.286542 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87"} err="failed to get container status \"437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87\": rpc error: code = NotFound desc = could not find container \"437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87\": container with ID starting with 437db8e9ab874808a0888769754b1406bfc7f6fdce7ad64fe5efb5b2d3bccc87 not found: ID does not exist" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.286578 5010 scope.go:117] "RemoveContainer" containerID="98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67" Nov 26 16:36:00 crc kubenswrapper[5010]: E1126 16:36:00.287156 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67\": container with ID starting with 98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67 not found: ID does not exist" containerID="98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67" Nov 26 16:36:00 crc kubenswrapper[5010]: I1126 16:36:00.287220 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67"} err="failed to get container status \"98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67\": rpc error: code = NotFound desc = could not find container \"98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67\": container with ID starting with 98cdbe347296ef276cef7419063a760d39673149c9e609c47f5adbed6af97c67 not found: ID does not exist" Nov 26 16:36:01 crc kubenswrapper[5010]: I1126 16:36:01.908120 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" path="/var/lib/kubelet/pods/7446dba9-b27d-4e0e-84d2-dd694d7b2a05/volumes" Nov 26 16:36:41 crc kubenswrapper[5010]: I1126 16:36:41.422949 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:36:41 crc kubenswrapper[5010]: I1126 16:36:41.423460 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.872525 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p2bz5"] Nov 26 16:36:55 crc kubenswrapper[5010]: E1126 16:36:55.873272 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="registry-server" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.873284 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="registry-server" Nov 26 16:36:55 crc kubenswrapper[5010]: E1126 16:36:55.873305 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="extract-utilities" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.873311 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="extract-utilities" Nov 26 16:36:55 crc kubenswrapper[5010]: E1126 16:36:55.873328 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="extract-content" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.873334 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="extract-content" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.873475 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7446dba9-b27d-4e0e-84d2-dd694d7b2a05" containerName="registry-server" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.875378 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.887086 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p2bz5"] Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.930165 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-catalog-content\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.930700 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxkvk\" (UniqueName: \"kubernetes.io/projected/3e27a4ef-3593-42d8-9767-ffc73c253ca6-kube-api-access-cxkvk\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:55 crc kubenswrapper[5010]: I1126 16:36:55.930958 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-utilities\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.033347 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxkvk\" (UniqueName: \"kubernetes.io/projected/3e27a4ef-3593-42d8-9767-ffc73c253ca6-kube-api-access-cxkvk\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.033409 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-utilities\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.033444 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-catalog-content\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.034006 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-catalog-content\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.034033 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-utilities\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.061052 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxkvk\" (UniqueName: \"kubernetes.io/projected/3e27a4ef-3593-42d8-9767-ffc73c253ca6-kube-api-access-cxkvk\") pod \"certified-operators-p2bz5\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.262814 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.766610 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p2bz5"] Nov 26 16:36:56 crc kubenswrapper[5010]: W1126 16:36:56.781854 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e27a4ef_3593_42d8_9767_ffc73c253ca6.slice/crio-ab1a9f9d3944937fdaa896a00c4661b07e44df043807a2e6f3b28b6a4c8a033a WatchSource:0}: Error finding container ab1a9f9d3944937fdaa896a00c4661b07e44df043807a2e6f3b28b6a4c8a033a: Status 404 returned error can't find the container with id ab1a9f9d3944937fdaa896a00c4661b07e44df043807a2e6f3b28b6a4c8a033a Nov 26 16:36:56 crc kubenswrapper[5010]: I1126 16:36:56.798758 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p2bz5" event={"ID":"3e27a4ef-3593-42d8-9767-ffc73c253ca6","Type":"ContainerStarted","Data":"ab1a9f9d3944937fdaa896a00c4661b07e44df043807a2e6f3b28b6a4c8a033a"} Nov 26 16:36:57 crc kubenswrapper[5010]: I1126 16:36:57.811853 5010 generic.go:334] "Generic (PLEG): container finished" podID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerID="85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934" exitCode=0 Nov 26 16:36:57 crc kubenswrapper[5010]: I1126 16:36:57.811926 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p2bz5" event={"ID":"3e27a4ef-3593-42d8-9767-ffc73c253ca6","Type":"ContainerDied","Data":"85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934"} Nov 26 16:36:58 crc kubenswrapper[5010]: I1126 16:36:58.826211 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p2bz5" event={"ID":"3e27a4ef-3593-42d8-9767-ffc73c253ca6","Type":"ContainerStarted","Data":"10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3"} Nov 26 16:36:59 crc kubenswrapper[5010]: I1126 16:36:59.842412 5010 generic.go:334] "Generic (PLEG): container finished" podID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerID="10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3" exitCode=0 Nov 26 16:36:59 crc kubenswrapper[5010]: I1126 16:36:59.842474 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p2bz5" event={"ID":"3e27a4ef-3593-42d8-9767-ffc73c253ca6","Type":"ContainerDied","Data":"10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3"} Nov 26 16:37:00 crc kubenswrapper[5010]: I1126 16:37:00.856807 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p2bz5" event={"ID":"3e27a4ef-3593-42d8-9767-ffc73c253ca6","Type":"ContainerStarted","Data":"ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e"} Nov 26 16:37:00 crc kubenswrapper[5010]: I1126 16:37:00.908281 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p2bz5" podStartSLOduration=3.18448775 podStartE2EDuration="5.908257599s" podCreationTimestamp="2025-11-26 16:36:55 +0000 UTC" firstStartedPulling="2025-11-26 16:36:57.814540741 +0000 UTC m=+4238.605257929" lastFinishedPulling="2025-11-26 16:37:00.53831059 +0000 UTC m=+4241.329027778" observedRunningTime="2025-11-26 16:37:00.902894707 +0000 UTC m=+4241.693611885" watchObservedRunningTime="2025-11-26 16:37:00.908257599 +0000 UTC m=+4241.698974767" Nov 26 16:37:06 crc kubenswrapper[5010]: I1126 16:37:06.263186 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:37:06 crc kubenswrapper[5010]: I1126 16:37:06.263941 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:37:06 crc kubenswrapper[5010]: I1126 16:37:06.344549 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:37:06 crc kubenswrapper[5010]: I1126 16:37:06.990779 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:37:07 crc kubenswrapper[5010]: I1126 16:37:07.057801 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p2bz5"] Nov 26 16:37:08 crc kubenswrapper[5010]: I1126 16:37:08.931596 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p2bz5" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="registry-server" containerID="cri-o://ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e" gracePeriod=2 Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.458400 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.565143 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-utilities\") pod \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.565277 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-catalog-content\") pod \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.565303 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxkvk\" (UniqueName: \"kubernetes.io/projected/3e27a4ef-3593-42d8-9767-ffc73c253ca6-kube-api-access-cxkvk\") pod \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\" (UID: \"3e27a4ef-3593-42d8-9767-ffc73c253ca6\") " Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.566694 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-utilities" (OuterVolumeSpecName: "utilities") pod "3e27a4ef-3593-42d8-9767-ffc73c253ca6" (UID: "3e27a4ef-3593-42d8-9767-ffc73c253ca6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.571985 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e27a4ef-3593-42d8-9767-ffc73c253ca6-kube-api-access-cxkvk" (OuterVolumeSpecName: "kube-api-access-cxkvk") pod "3e27a4ef-3593-42d8-9767-ffc73c253ca6" (UID: "3e27a4ef-3593-42d8-9767-ffc73c253ca6"). InnerVolumeSpecName "kube-api-access-cxkvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.666673 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxkvk\" (UniqueName: \"kubernetes.io/projected/3e27a4ef-3593-42d8-9767-ffc73c253ca6-kube-api-access-cxkvk\") on node \"crc\" DevicePath \"\"" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.666748 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.911090 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3e27a4ef-3593-42d8-9767-ffc73c253ca6" (UID: "3e27a4ef-3593-42d8-9767-ffc73c253ca6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.947421 5010 generic.go:334] "Generic (PLEG): container finished" podID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerID="ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e" exitCode=0 Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.947471 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p2bz5" event={"ID":"3e27a4ef-3593-42d8-9767-ffc73c253ca6","Type":"ContainerDied","Data":"ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e"} Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.947509 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p2bz5" event={"ID":"3e27a4ef-3593-42d8-9767-ffc73c253ca6","Type":"ContainerDied","Data":"ab1a9f9d3944937fdaa896a00c4661b07e44df043807a2e6f3b28b6a4c8a033a"} Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.947532 5010 scope.go:117] "RemoveContainer" containerID="ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.947563 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p2bz5" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.971803 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e27a4ef-3593-42d8-9767-ffc73c253ca6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:37:09 crc kubenswrapper[5010]: I1126 16:37:09.981813 5010 scope.go:117] "RemoveContainer" containerID="10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3" Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.018517 5010 scope.go:117] "RemoveContainer" containerID="85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934" Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.022943 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p2bz5"] Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.032194 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p2bz5"] Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.046119 5010 scope.go:117] "RemoveContainer" containerID="ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e" Nov 26 16:37:10 crc kubenswrapper[5010]: E1126 16:37:10.047342 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e\": container with ID starting with ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e not found: ID does not exist" containerID="ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e" Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.047432 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e"} err="failed to get container status \"ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e\": rpc error: code = NotFound desc = could not find container \"ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e\": container with ID starting with ea38a093997d32d5c90a55688b0556a827a45cfaf16c943dbbcdcc29fca1563e not found: ID does not exist" Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.047496 5010 scope.go:117] "RemoveContainer" containerID="10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3" Nov 26 16:37:10 crc kubenswrapper[5010]: E1126 16:37:10.047994 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3\": container with ID starting with 10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3 not found: ID does not exist" containerID="10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3" Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.048036 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3"} err="failed to get container status \"10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3\": rpc error: code = NotFound desc = could not find container \"10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3\": container with ID starting with 10df2230d2849c1be07314f881c4fe696816ce79662524d855d44258fa14d8c3 not found: ID does not exist" Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.048067 5010 scope.go:117] "RemoveContainer" containerID="85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934" Nov 26 16:37:10 crc kubenswrapper[5010]: E1126 16:37:10.048579 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934\": container with ID starting with 85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934 not found: ID does not exist" containerID="85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934" Nov 26 16:37:10 crc kubenswrapper[5010]: I1126 16:37:10.048622 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934"} err="failed to get container status \"85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934\": rpc error: code = NotFound desc = could not find container \"85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934\": container with ID starting with 85da5e95fb4c06ac93771d40bff646d2ab52fadf1a53f28579283408b202b934 not found: ID does not exist" Nov 26 16:37:11 crc kubenswrapper[5010]: I1126 16:37:11.423040 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:37:11 crc kubenswrapper[5010]: I1126 16:37:11.423476 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:37:11 crc kubenswrapper[5010]: I1126 16:37:11.910569 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" path="/var/lib/kubelet/pods/3e27a4ef-3593-42d8-9767-ffc73c253ca6/volumes" Nov 26 16:37:41 crc kubenswrapper[5010]: I1126 16:37:41.422841 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:37:41 crc kubenswrapper[5010]: I1126 16:37:41.423661 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:37:41 crc kubenswrapper[5010]: I1126 16:37:41.423778 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:37:41 crc kubenswrapper[5010]: I1126 16:37:41.425109 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:37:41 crc kubenswrapper[5010]: I1126 16:37:41.425242 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" gracePeriod=600 Nov 26 16:37:41 crc kubenswrapper[5010]: E1126 16:37:41.578464 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:37:42 crc kubenswrapper[5010]: I1126 16:37:42.277680 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" exitCode=0 Nov 26 16:37:42 crc kubenswrapper[5010]: I1126 16:37:42.277771 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff"} Nov 26 16:37:42 crc kubenswrapper[5010]: I1126 16:37:42.277849 5010 scope.go:117] "RemoveContainer" containerID="ceac9ea4c67a4d52c2e36ff86bd795f4bcf9d6de60b38bef3ccbb46004f4a814" Nov 26 16:37:42 crc kubenswrapper[5010]: I1126 16:37:42.278637 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:37:42 crc kubenswrapper[5010]: E1126 16:37:42.279364 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:37:54 crc kubenswrapper[5010]: I1126 16:37:54.892239 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:37:54 crc kubenswrapper[5010]: E1126 16:37:54.893362 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.612518 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qxzc5"] Nov 26 16:37:59 crc kubenswrapper[5010]: E1126 16:37:59.613445 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="extract-content" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.613461 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="extract-content" Nov 26 16:37:59 crc kubenswrapper[5010]: E1126 16:37:59.613492 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="registry-server" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.613498 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="registry-server" Nov 26 16:37:59 crc kubenswrapper[5010]: E1126 16:37:59.613508 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="extract-utilities" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.613517 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="extract-utilities" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.613648 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e27a4ef-3593-42d8-9767-ffc73c253ca6" containerName="registry-server" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.614756 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.625499 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxzc5"] Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.735406 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-725lr\" (UniqueName: \"kubernetes.io/projected/5206a32b-db27-44ac-b789-ec7a63789f1a-kube-api-access-725lr\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.735559 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-catalog-content\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.735769 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-utilities\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.837702 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-utilities\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.838270 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-utilities\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.838323 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-725lr\" (UniqueName: \"kubernetes.io/projected/5206a32b-db27-44ac-b789-ec7a63789f1a-kube-api-access-725lr\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.838404 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-catalog-content\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.838743 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-catalog-content\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.867306 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-725lr\" (UniqueName: \"kubernetes.io/projected/5206a32b-db27-44ac-b789-ec7a63789f1a-kube-api-access-725lr\") pod \"community-operators-qxzc5\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:37:59 crc kubenswrapper[5010]: I1126 16:37:59.946620 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:38:00 crc kubenswrapper[5010]: I1126 16:38:00.486399 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qxzc5"] Nov 26 16:38:01 crc kubenswrapper[5010]: I1126 16:38:01.474228 5010 generic.go:334] "Generic (PLEG): container finished" podID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerID="5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d" exitCode=0 Nov 26 16:38:01 crc kubenswrapper[5010]: I1126 16:38:01.474303 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxzc5" event={"ID":"5206a32b-db27-44ac-b789-ec7a63789f1a","Type":"ContainerDied","Data":"5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d"} Nov 26 16:38:01 crc kubenswrapper[5010]: I1126 16:38:01.474572 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxzc5" event={"ID":"5206a32b-db27-44ac-b789-ec7a63789f1a","Type":"ContainerStarted","Data":"1e1a7a1fb7ba7f0de91eea6c47446465d383f26245e4ae7ca871bf3d99e8ca3d"} Nov 26 16:38:03 crc kubenswrapper[5010]: I1126 16:38:03.498005 5010 generic.go:334] "Generic (PLEG): container finished" podID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerID="21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787" exitCode=0 Nov 26 16:38:03 crc kubenswrapper[5010]: I1126 16:38:03.498067 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxzc5" event={"ID":"5206a32b-db27-44ac-b789-ec7a63789f1a","Type":"ContainerDied","Data":"21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787"} Nov 26 16:38:04 crc kubenswrapper[5010]: I1126 16:38:04.508268 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxzc5" event={"ID":"5206a32b-db27-44ac-b789-ec7a63789f1a","Type":"ContainerStarted","Data":"1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004"} Nov 26 16:38:04 crc kubenswrapper[5010]: I1126 16:38:04.539139 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qxzc5" podStartSLOduration=3.058787831 podStartE2EDuration="5.53910607s" podCreationTimestamp="2025-11-26 16:37:59 +0000 UTC" firstStartedPulling="2025-11-26 16:38:01.476386118 +0000 UTC m=+4302.267103266" lastFinishedPulling="2025-11-26 16:38:03.956704317 +0000 UTC m=+4304.747421505" observedRunningTime="2025-11-26 16:38:04.5277815 +0000 UTC m=+4305.318498728" watchObservedRunningTime="2025-11-26 16:38:04.53910607 +0000 UTC m=+4305.329823258" Nov 26 16:38:07 crc kubenswrapper[5010]: I1126 16:38:07.891832 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:38:07 crc kubenswrapper[5010]: E1126 16:38:07.892745 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:38:09 crc kubenswrapper[5010]: I1126 16:38:09.947088 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:38:09 crc kubenswrapper[5010]: I1126 16:38:09.947201 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:38:10 crc kubenswrapper[5010]: I1126 16:38:10.025682 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:38:10 crc kubenswrapper[5010]: I1126 16:38:10.651189 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:38:10 crc kubenswrapper[5010]: I1126 16:38:10.717805 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxzc5"] Nov 26 16:38:12 crc kubenswrapper[5010]: I1126 16:38:12.583097 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qxzc5" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="registry-server" containerID="cri-o://1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004" gracePeriod=2 Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.054686 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.170021 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-725lr\" (UniqueName: \"kubernetes.io/projected/5206a32b-db27-44ac-b789-ec7a63789f1a-kube-api-access-725lr\") pod \"5206a32b-db27-44ac-b789-ec7a63789f1a\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.170123 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-catalog-content\") pod \"5206a32b-db27-44ac-b789-ec7a63789f1a\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.170156 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-utilities\") pod \"5206a32b-db27-44ac-b789-ec7a63789f1a\" (UID: \"5206a32b-db27-44ac-b789-ec7a63789f1a\") " Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.172391 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-utilities" (OuterVolumeSpecName: "utilities") pod "5206a32b-db27-44ac-b789-ec7a63789f1a" (UID: "5206a32b-db27-44ac-b789-ec7a63789f1a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.181975 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5206a32b-db27-44ac-b789-ec7a63789f1a-kube-api-access-725lr" (OuterVolumeSpecName: "kube-api-access-725lr") pod "5206a32b-db27-44ac-b789-ec7a63789f1a" (UID: "5206a32b-db27-44ac-b789-ec7a63789f1a"). InnerVolumeSpecName "kube-api-access-725lr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.274104 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.274143 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-725lr\" (UniqueName: \"kubernetes.io/projected/5206a32b-db27-44ac-b789-ec7a63789f1a-kube-api-access-725lr\") on node \"crc\" DevicePath \"\"" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.357130 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5206a32b-db27-44ac-b789-ec7a63789f1a" (UID: "5206a32b-db27-44ac-b789-ec7a63789f1a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.375467 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5206a32b-db27-44ac-b789-ec7a63789f1a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.594955 5010 generic.go:334] "Generic (PLEG): container finished" podID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerID="1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004" exitCode=0 Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.595018 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qxzc5" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.595024 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxzc5" event={"ID":"5206a32b-db27-44ac-b789-ec7a63789f1a","Type":"ContainerDied","Data":"1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004"} Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.595077 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qxzc5" event={"ID":"5206a32b-db27-44ac-b789-ec7a63789f1a","Type":"ContainerDied","Data":"1e1a7a1fb7ba7f0de91eea6c47446465d383f26245e4ae7ca871bf3d99e8ca3d"} Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.595109 5010 scope.go:117] "RemoveContainer" containerID="1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.636337 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qxzc5"] Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.638522 5010 scope.go:117] "RemoveContainer" containerID="21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.648908 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qxzc5"] Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.668579 5010 scope.go:117] "RemoveContainer" containerID="5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.698286 5010 scope.go:117] "RemoveContainer" containerID="1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004" Nov 26 16:38:13 crc kubenswrapper[5010]: E1126 16:38:13.699012 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004\": container with ID starting with 1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004 not found: ID does not exist" containerID="1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.699054 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004"} err="failed to get container status \"1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004\": rpc error: code = NotFound desc = could not find container \"1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004\": container with ID starting with 1ec316a6cd11d5ce283cc4569177392827919c2c5a3fd52022f8aa54a613b004 not found: ID does not exist" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.699081 5010 scope.go:117] "RemoveContainer" containerID="21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787" Nov 26 16:38:13 crc kubenswrapper[5010]: E1126 16:38:13.699412 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787\": container with ID starting with 21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787 not found: ID does not exist" containerID="21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.699565 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787"} err="failed to get container status \"21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787\": rpc error: code = NotFound desc = could not find container \"21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787\": container with ID starting with 21bc875ab6003b344243e678064b2baaef3604f2c364043be803de915060f787 not found: ID does not exist" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.699684 5010 scope.go:117] "RemoveContainer" containerID="5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d" Nov 26 16:38:13 crc kubenswrapper[5010]: E1126 16:38:13.700511 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d\": container with ID starting with 5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d not found: ID does not exist" containerID="5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.700552 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d"} err="failed to get container status \"5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d\": rpc error: code = NotFound desc = could not find container \"5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d\": container with ID starting with 5aea7a9e1316ecafe15ed1fafdf23c0a2c04ab71ee4e1f98d269252d814ff44d not found: ID does not exist" Nov 26 16:38:13 crc kubenswrapper[5010]: I1126 16:38:13.906451 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" path="/var/lib/kubelet/pods/5206a32b-db27-44ac-b789-ec7a63789f1a/volumes" Nov 26 16:38:20 crc kubenswrapper[5010]: I1126 16:38:20.891547 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:38:20 crc kubenswrapper[5010]: E1126 16:38:20.892757 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:38:32 crc kubenswrapper[5010]: I1126 16:38:32.892128 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:38:32 crc kubenswrapper[5010]: E1126 16:38:32.892760 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:38:43 crc kubenswrapper[5010]: I1126 16:38:43.893889 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:38:43 crc kubenswrapper[5010]: E1126 16:38:43.894608 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:38:54 crc kubenswrapper[5010]: I1126 16:38:54.892196 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:38:54 crc kubenswrapper[5010]: E1126 16:38:54.893528 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:39:05 crc kubenswrapper[5010]: I1126 16:39:05.891593 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:39:05 crc kubenswrapper[5010]: E1126 16:39:05.893269 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:39:20 crc kubenswrapper[5010]: I1126 16:39:20.892068 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:39:20 crc kubenswrapper[5010]: E1126 16:39:20.893103 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:39:33 crc kubenswrapper[5010]: I1126 16:39:33.891959 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:39:33 crc kubenswrapper[5010]: E1126 16:39:33.892954 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:39:47 crc kubenswrapper[5010]: I1126 16:39:47.892359 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:39:47 crc kubenswrapper[5010]: E1126 16:39:47.893238 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:39:58 crc kubenswrapper[5010]: I1126 16:39:58.892495 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:39:58 crc kubenswrapper[5010]: E1126 16:39:58.893884 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:40:11 crc kubenswrapper[5010]: I1126 16:40:11.892299 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:40:11 crc kubenswrapper[5010]: E1126 16:40:11.893275 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:40:26 crc kubenswrapper[5010]: I1126 16:40:26.892396 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:40:26 crc kubenswrapper[5010]: E1126 16:40:26.893464 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:40:38 crc kubenswrapper[5010]: I1126 16:40:38.891577 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:40:38 crc kubenswrapper[5010]: E1126 16:40:38.892513 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:40:53 crc kubenswrapper[5010]: I1126 16:40:53.892416 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:40:53 crc kubenswrapper[5010]: E1126 16:40:53.893699 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:41:07 crc kubenswrapper[5010]: I1126 16:41:07.891948 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:41:07 crc kubenswrapper[5010]: E1126 16:41:07.893145 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:41:18 crc kubenswrapper[5010]: I1126 16:41:18.891690 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:41:18 crc kubenswrapper[5010]: E1126 16:41:18.893172 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:41:32 crc kubenswrapper[5010]: I1126 16:41:32.892014 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:41:32 crc kubenswrapper[5010]: E1126 16:41:32.893171 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:41:45 crc kubenswrapper[5010]: I1126 16:41:45.892113 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:41:45 crc kubenswrapper[5010]: E1126 16:41:45.893183 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:42:00 crc kubenswrapper[5010]: I1126 16:42:00.891403 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:42:00 crc kubenswrapper[5010]: E1126 16:42:00.893500 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:42:12 crc kubenswrapper[5010]: I1126 16:42:12.891651 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:42:12 crc kubenswrapper[5010]: E1126 16:42:12.892876 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:42:26 crc kubenswrapper[5010]: I1126 16:42:26.891629 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:42:26 crc kubenswrapper[5010]: E1126 16:42:26.893693 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.309564 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-fvfr8"] Nov 26 16:42:40 crc kubenswrapper[5010]: E1126 16:42:40.310377 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="extract-utilities" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.310392 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="extract-utilities" Nov 26 16:42:40 crc kubenswrapper[5010]: E1126 16:42:40.310420 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="registry-server" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.310429 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="registry-server" Nov 26 16:42:40 crc kubenswrapper[5010]: E1126 16:42:40.310460 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="extract-content" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.310469 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="extract-content" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.310649 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5206a32b-db27-44ac-b789-ec7a63789f1a" containerName="registry-server" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.311845 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.356468 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fvfr8"] Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.390069 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-utilities\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.390395 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-catalog-content\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.390515 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22fls\" (UniqueName: \"kubernetes.io/projected/e61d3e04-7c50-4aef-88d0-26e0b63b7661-kube-api-access-22fls\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.492227 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-utilities\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.492527 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-catalog-content\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.492618 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22fls\" (UniqueName: \"kubernetes.io/projected/e61d3e04-7c50-4aef-88d0-26e0b63b7661-kube-api-access-22fls\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.492952 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-utilities\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.493201 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-catalog-content\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.524345 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22fls\" (UniqueName: \"kubernetes.io/projected/e61d3e04-7c50-4aef-88d0-26e0b63b7661-kube-api-access-22fls\") pod \"redhat-operators-fvfr8\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.633634 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:40 crc kubenswrapper[5010]: I1126 16:42:40.897034 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-fvfr8"] Nov 26 16:42:41 crc kubenswrapper[5010]: I1126 16:42:41.643699 5010 generic.go:334] "Generic (PLEG): container finished" podID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerID="98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226" exitCode=0 Nov 26 16:42:41 crc kubenswrapper[5010]: I1126 16:42:41.643766 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fvfr8" event={"ID":"e61d3e04-7c50-4aef-88d0-26e0b63b7661","Type":"ContainerDied","Data":"98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226"} Nov 26 16:42:41 crc kubenswrapper[5010]: I1126 16:42:41.643793 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fvfr8" event={"ID":"e61d3e04-7c50-4aef-88d0-26e0b63b7661","Type":"ContainerStarted","Data":"c1b3206ace1e44f50e5db905383fed14a053469b03fd3e44b3fc0f8de8807b16"} Nov 26 16:42:41 crc kubenswrapper[5010]: I1126 16:42:41.645781 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:42:41 crc kubenswrapper[5010]: I1126 16:42:41.892062 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:42:42 crc kubenswrapper[5010]: I1126 16:42:42.652601 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"569ad5c2cdd4f1854f3e605f7633f5753ed4e4e3de2ce020afe82ef2326c961c"} Nov 26 16:42:43 crc kubenswrapper[5010]: I1126 16:42:43.667548 5010 generic.go:334] "Generic (PLEG): container finished" podID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerID="1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df" exitCode=0 Nov 26 16:42:43 crc kubenswrapper[5010]: I1126 16:42:43.667840 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fvfr8" event={"ID":"e61d3e04-7c50-4aef-88d0-26e0b63b7661","Type":"ContainerDied","Data":"1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df"} Nov 26 16:42:45 crc kubenswrapper[5010]: I1126 16:42:45.689077 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fvfr8" event={"ID":"e61d3e04-7c50-4aef-88d0-26e0b63b7661","Type":"ContainerStarted","Data":"2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0"} Nov 26 16:42:50 crc kubenswrapper[5010]: I1126 16:42:50.634336 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:50 crc kubenswrapper[5010]: I1126 16:42:50.634931 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:50 crc kubenswrapper[5010]: I1126 16:42:50.699697 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:50 crc kubenswrapper[5010]: I1126 16:42:50.723484 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-fvfr8" podStartSLOduration=7.875191904 podStartE2EDuration="10.723460213s" podCreationTimestamp="2025-11-26 16:42:40 +0000 UTC" firstStartedPulling="2025-11-26 16:42:41.645549183 +0000 UTC m=+4582.436266331" lastFinishedPulling="2025-11-26 16:42:44.493817502 +0000 UTC m=+4585.284534640" observedRunningTime="2025-11-26 16:42:45.724600379 +0000 UTC m=+4586.515317567" watchObservedRunningTime="2025-11-26 16:42:50.723460213 +0000 UTC m=+4591.514177391" Nov 26 16:42:50 crc kubenswrapper[5010]: I1126 16:42:50.785816 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:50 crc kubenswrapper[5010]: I1126 16:42:50.937921 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fvfr8"] Nov 26 16:42:52 crc kubenswrapper[5010]: I1126 16:42:52.751655 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-fvfr8" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="registry-server" containerID="cri-o://2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0" gracePeriod=2 Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.227444 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.403965 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-utilities\") pod \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.404062 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-catalog-content\") pod \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.404251 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-22fls\" (UniqueName: \"kubernetes.io/projected/e61d3e04-7c50-4aef-88d0-26e0b63b7661-kube-api-access-22fls\") pod \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\" (UID: \"e61d3e04-7c50-4aef-88d0-26e0b63b7661\") " Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.405783 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-utilities" (OuterVolumeSpecName: "utilities") pod "e61d3e04-7c50-4aef-88d0-26e0b63b7661" (UID: "e61d3e04-7c50-4aef-88d0-26e0b63b7661"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.411956 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e61d3e04-7c50-4aef-88d0-26e0b63b7661-kube-api-access-22fls" (OuterVolumeSpecName: "kube-api-access-22fls") pod "e61d3e04-7c50-4aef-88d0-26e0b63b7661" (UID: "e61d3e04-7c50-4aef-88d0-26e0b63b7661"). InnerVolumeSpecName "kube-api-access-22fls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.506795 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.506846 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-22fls\" (UniqueName: \"kubernetes.io/projected/e61d3e04-7c50-4aef-88d0-26e0b63b7661-kube-api-access-22fls\") on node \"crc\" DevicePath \"\"" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.765067 5010 generic.go:334] "Generic (PLEG): container finished" podID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerID="2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0" exitCode=0 Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.765156 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fvfr8" event={"ID":"e61d3e04-7c50-4aef-88d0-26e0b63b7661","Type":"ContainerDied","Data":"2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0"} Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.765186 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-fvfr8" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.765230 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-fvfr8" event={"ID":"e61d3e04-7c50-4aef-88d0-26e0b63b7661","Type":"ContainerDied","Data":"c1b3206ace1e44f50e5db905383fed14a053469b03fd3e44b3fc0f8de8807b16"} Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.765268 5010 scope.go:117] "RemoveContainer" containerID="2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.798174 5010 scope.go:117] "RemoveContainer" containerID="1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.830059 5010 scope.go:117] "RemoveContainer" containerID="98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.881221 5010 scope.go:117] "RemoveContainer" containerID="2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0" Nov 26 16:42:53 crc kubenswrapper[5010]: E1126 16:42:53.881873 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0\": container with ID starting with 2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0 not found: ID does not exist" containerID="2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.881962 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0"} err="failed to get container status \"2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0\": rpc error: code = NotFound desc = could not find container \"2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0\": container with ID starting with 2e5fcb19dda83326b5188c135997c7294d74a29dbb82a6060facc1853aa30aa0 not found: ID does not exist" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.882025 5010 scope.go:117] "RemoveContainer" containerID="1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df" Nov 26 16:42:53 crc kubenswrapper[5010]: E1126 16:42:53.882782 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df\": container with ID starting with 1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df not found: ID does not exist" containerID="1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.882872 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df"} err="failed to get container status \"1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df\": rpc error: code = NotFound desc = could not find container \"1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df\": container with ID starting with 1028d66ed1ec53c411576ecb47565699a6f26d7007de856a37aad68a701715df not found: ID does not exist" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.882910 5010 scope.go:117] "RemoveContainer" containerID="98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226" Nov 26 16:42:53 crc kubenswrapper[5010]: E1126 16:42:53.883413 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226\": container with ID starting with 98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226 not found: ID does not exist" containerID="98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226" Nov 26 16:42:53 crc kubenswrapper[5010]: I1126 16:42:53.883457 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226"} err="failed to get container status \"98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226\": rpc error: code = NotFound desc = could not find container \"98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226\": container with ID starting with 98fc52fd227f0bd77027384fd5d6039da6065fd3b55505626c760751d63ea226 not found: ID does not exist" Nov 26 16:42:54 crc kubenswrapper[5010]: I1126 16:42:54.849269 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e61d3e04-7c50-4aef-88d0-26e0b63b7661" (UID: "e61d3e04-7c50-4aef-88d0-26e0b63b7661"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:42:54 crc kubenswrapper[5010]: I1126 16:42:54.939671 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e61d3e04-7c50-4aef-88d0-26e0b63b7661-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:42:55 crc kubenswrapper[5010]: I1126 16:42:55.028455 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-fvfr8"] Nov 26 16:42:55 crc kubenswrapper[5010]: I1126 16:42:55.040455 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-fvfr8"] Nov 26 16:42:55 crc kubenswrapper[5010]: I1126 16:42:55.902230 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" path="/var/lib/kubelet/pods/e61d3e04-7c50-4aef-88d0-26e0b63b7661/volumes" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.165412 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj"] Nov 26 16:45:00 crc kubenswrapper[5010]: E1126 16:45:00.166479 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="extract-utilities" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.166509 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="extract-utilities" Nov 26 16:45:00 crc kubenswrapper[5010]: E1126 16:45:00.166561 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="registry-server" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.166574 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="registry-server" Nov 26 16:45:00 crc kubenswrapper[5010]: E1126 16:45:00.166595 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="extract-content" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.166606 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="extract-content" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.166914 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e61d3e04-7c50-4aef-88d0-26e0b63b7661" containerName="registry-server" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.167597 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.170835 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.170892 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.185240 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj"] Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.263651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gprzz\" (UniqueName: \"kubernetes.io/projected/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-kube-api-access-gprzz\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.263757 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-config-volume\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.263824 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-secret-volume\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.365551 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gprzz\" (UniqueName: \"kubernetes.io/projected/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-kube-api-access-gprzz\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.365628 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-config-volume\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.365695 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-secret-volume\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.367122 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-config-volume\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.377007 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-secret-volume\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.385165 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gprzz\" (UniqueName: \"kubernetes.io/projected/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-kube-api-access-gprzz\") pod \"collect-profiles-29402925-4qnzj\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.491373 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:00 crc kubenswrapper[5010]: I1126 16:45:00.708380 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj"] Nov 26 16:45:01 crc kubenswrapper[5010]: I1126 16:45:01.081828 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" event={"ID":"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe","Type":"ContainerStarted","Data":"13bae8e2b7d0ae7e48b49845a8a5ebc69ac26eeeabbe26448bb9cfce815219c1"} Nov 26 16:45:01 crc kubenswrapper[5010]: I1126 16:45:01.081881 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" event={"ID":"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe","Type":"ContainerStarted","Data":"d0947dde1d000e8e4d625b16322d6f473db07d0135b5e14926486773f5346f14"} Nov 26 16:45:01 crc kubenswrapper[5010]: I1126 16:45:01.104057 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" podStartSLOduration=1.10403973 podStartE2EDuration="1.10403973s" podCreationTimestamp="2025-11-26 16:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:45:01.101127108 +0000 UTC m=+4721.891844256" watchObservedRunningTime="2025-11-26 16:45:01.10403973 +0000 UTC m=+4721.894756878" Nov 26 16:45:02 crc kubenswrapper[5010]: I1126 16:45:02.094828 5010 generic.go:334] "Generic (PLEG): container finished" podID="1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" containerID="13bae8e2b7d0ae7e48b49845a8a5ebc69ac26eeeabbe26448bb9cfce815219c1" exitCode=0 Nov 26 16:45:02 crc kubenswrapper[5010]: I1126 16:45:02.094883 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" event={"ID":"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe","Type":"ContainerDied","Data":"13bae8e2b7d0ae7e48b49845a8a5ebc69ac26eeeabbe26448bb9cfce815219c1"} Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.689109 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.822958 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gprzz\" (UniqueName: \"kubernetes.io/projected/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-kube-api-access-gprzz\") pod \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.823194 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-config-volume\") pod \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.823247 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-secret-volume\") pod \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\" (UID: \"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe\") " Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.824510 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-config-volume" (OuterVolumeSpecName: "config-volume") pod "1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" (UID: "1cb3e510-06f9-4bf3-9b5c-382312c3b4fe"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.832956 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" (UID: "1cb3e510-06f9-4bf3-9b5c-382312c3b4fe"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.832973 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-kube-api-access-gprzz" (OuterVolumeSpecName: "kube-api-access-gprzz") pod "1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" (UID: "1cb3e510-06f9-4bf3-9b5c-382312c3b4fe"). InnerVolumeSpecName "kube-api-access-gprzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.925274 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.925328 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 16:45:03 crc kubenswrapper[5010]: I1126 16:45:03.925349 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gprzz\" (UniqueName: \"kubernetes.io/projected/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe-kube-api-access-gprzz\") on node \"crc\" DevicePath \"\"" Nov 26 16:45:04 crc kubenswrapper[5010]: I1126 16:45:04.115737 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" event={"ID":"1cb3e510-06f9-4bf3-9b5c-382312c3b4fe","Type":"ContainerDied","Data":"d0947dde1d000e8e4d625b16322d6f473db07d0135b5e14926486773f5346f14"} Nov 26 16:45:04 crc kubenswrapper[5010]: I1126 16:45:04.115807 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0947dde1d000e8e4d625b16322d6f473db07d0135b5e14926486773f5346f14" Nov 26 16:45:04 crc kubenswrapper[5010]: I1126 16:45:04.115860 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj" Nov 26 16:45:04 crc kubenswrapper[5010]: I1126 16:45:04.782363 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9"] Nov 26 16:45:04 crc kubenswrapper[5010]: I1126 16:45:04.789636 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402880-phdh9"] Nov 26 16:45:05 crc kubenswrapper[5010]: I1126 16:45:05.911126 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f410c598-4b4b-4f3b-b5cb-772ff71b0a80" path="/var/lib/kubelet/pods/f410c598-4b4b-4f3b-b5cb-772ff71b0a80/volumes" Nov 26 16:45:11 crc kubenswrapper[5010]: I1126 16:45:11.422824 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:45:11 crc kubenswrapper[5010]: I1126 16:45:11.424865 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:45:41 crc kubenswrapper[5010]: I1126 16:45:41.422482 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:45:41 crc kubenswrapper[5010]: I1126 16:45:41.423159 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.377290 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mrkgj"] Nov 26 16:45:55 crc kubenswrapper[5010]: E1126 16:45:55.378152 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" containerName="collect-profiles" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.378165 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" containerName="collect-profiles" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.378316 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" containerName="collect-profiles" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.379268 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.404416 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrkgj"] Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.505217 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-utilities\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.505293 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-catalog-content\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.505340 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7vfnl\" (UniqueName: \"kubernetes.io/projected/1364c333-abfe-4947-83fc-cfe9adb0c252-kube-api-access-7vfnl\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.606780 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-catalog-content\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.606849 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7vfnl\" (UniqueName: \"kubernetes.io/projected/1364c333-abfe-4947-83fc-cfe9adb0c252-kube-api-access-7vfnl\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.606938 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-utilities\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.607618 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-catalog-content\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.607650 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-utilities\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.649251 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7vfnl\" (UniqueName: \"kubernetes.io/projected/1364c333-abfe-4947-83fc-cfe9adb0c252-kube-api-access-7vfnl\") pod \"redhat-marketplace-mrkgj\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:55 crc kubenswrapper[5010]: I1126 16:45:55.724998 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:45:56 crc kubenswrapper[5010]: I1126 16:45:56.063010 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrkgj"] Nov 26 16:45:56 crc kubenswrapper[5010]: W1126 16:45:56.068655 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1364c333_abfe_4947_83fc_cfe9adb0c252.slice/crio-6acab2b16cfd144f36f46c9b3bcc82eb35fa37aa3ff840e8d50760c5fb50a95d WatchSource:0}: Error finding container 6acab2b16cfd144f36f46c9b3bcc82eb35fa37aa3ff840e8d50760c5fb50a95d: Status 404 returned error can't find the container with id 6acab2b16cfd144f36f46c9b3bcc82eb35fa37aa3ff840e8d50760c5fb50a95d Nov 26 16:45:56 crc kubenswrapper[5010]: I1126 16:45:56.624482 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrkgj" event={"ID":"1364c333-abfe-4947-83fc-cfe9adb0c252","Type":"ContainerStarted","Data":"6acab2b16cfd144f36f46c9b3bcc82eb35fa37aa3ff840e8d50760c5fb50a95d"} Nov 26 16:45:57 crc kubenswrapper[5010]: I1126 16:45:57.638812 5010 generic.go:334] "Generic (PLEG): container finished" podID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerID="404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c" exitCode=0 Nov 26 16:45:57 crc kubenswrapper[5010]: I1126 16:45:57.638900 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrkgj" event={"ID":"1364c333-abfe-4947-83fc-cfe9adb0c252","Type":"ContainerDied","Data":"404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c"} Nov 26 16:45:59 crc kubenswrapper[5010]: I1126 16:45:59.662484 5010 generic.go:334] "Generic (PLEG): container finished" podID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerID="b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31" exitCode=0 Nov 26 16:45:59 crc kubenswrapper[5010]: I1126 16:45:59.662556 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrkgj" event={"ID":"1364c333-abfe-4947-83fc-cfe9adb0c252","Type":"ContainerDied","Data":"b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31"} Nov 26 16:46:01 crc kubenswrapper[5010]: I1126 16:46:01.683913 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrkgj" event={"ID":"1364c333-abfe-4947-83fc-cfe9adb0c252","Type":"ContainerStarted","Data":"491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01"} Nov 26 16:46:01 crc kubenswrapper[5010]: I1126 16:46:01.714929 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mrkgj" podStartSLOduration=3.8786383239999997 podStartE2EDuration="6.714900141s" podCreationTimestamp="2025-11-26 16:45:55 +0000 UTC" firstStartedPulling="2025-11-26 16:45:57.643011387 +0000 UTC m=+4778.433728565" lastFinishedPulling="2025-11-26 16:46:00.479273204 +0000 UTC m=+4781.269990382" observedRunningTime="2025-11-26 16:46:01.707507667 +0000 UTC m=+4782.498224885" watchObservedRunningTime="2025-11-26 16:46:01.714900141 +0000 UTC m=+4782.505617329" Nov 26 16:46:04 crc kubenswrapper[5010]: I1126 16:46:04.651209 5010 scope.go:117] "RemoveContainer" containerID="205a4dfc01f88c6e24ba775b4a943f30d4dd93428b79c6c7bed725e7d8543f80" Nov 26 16:46:05 crc kubenswrapper[5010]: I1126 16:46:05.725122 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:46:05 crc kubenswrapper[5010]: I1126 16:46:05.725185 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:46:05 crc kubenswrapper[5010]: I1126 16:46:05.802114 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:46:06 crc kubenswrapper[5010]: I1126 16:46:06.818666 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:46:06 crc kubenswrapper[5010]: I1126 16:46:06.891538 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrkgj"] Nov 26 16:46:08 crc kubenswrapper[5010]: I1126 16:46:08.754906 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mrkgj" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="registry-server" containerID="cri-o://491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01" gracePeriod=2 Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.210583 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.336904 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-utilities\") pod \"1364c333-abfe-4947-83fc-cfe9adb0c252\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.336952 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-catalog-content\") pod \"1364c333-abfe-4947-83fc-cfe9adb0c252\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.337013 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vfnl\" (UniqueName: \"kubernetes.io/projected/1364c333-abfe-4947-83fc-cfe9adb0c252-kube-api-access-7vfnl\") pod \"1364c333-abfe-4947-83fc-cfe9adb0c252\" (UID: \"1364c333-abfe-4947-83fc-cfe9adb0c252\") " Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.338565 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-utilities" (OuterVolumeSpecName: "utilities") pod "1364c333-abfe-4947-83fc-cfe9adb0c252" (UID: "1364c333-abfe-4947-83fc-cfe9adb0c252"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.342525 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1364c333-abfe-4947-83fc-cfe9adb0c252-kube-api-access-7vfnl" (OuterVolumeSpecName: "kube-api-access-7vfnl") pod "1364c333-abfe-4947-83fc-cfe9adb0c252" (UID: "1364c333-abfe-4947-83fc-cfe9adb0c252"). InnerVolumeSpecName "kube-api-access-7vfnl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.354007 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1364c333-abfe-4947-83fc-cfe9adb0c252" (UID: "1364c333-abfe-4947-83fc-cfe9adb0c252"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.438197 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.438233 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1364c333-abfe-4947-83fc-cfe9adb0c252-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.438246 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vfnl\" (UniqueName: \"kubernetes.io/projected/1364c333-abfe-4947-83fc-cfe9adb0c252-kube-api-access-7vfnl\") on node \"crc\" DevicePath \"\"" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.767077 5010 generic.go:334] "Generic (PLEG): container finished" podID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerID="491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01" exitCode=0 Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.767192 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrkgj" event={"ID":"1364c333-abfe-4947-83fc-cfe9adb0c252","Type":"ContainerDied","Data":"491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01"} Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.767452 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mrkgj" event={"ID":"1364c333-abfe-4947-83fc-cfe9adb0c252","Type":"ContainerDied","Data":"6acab2b16cfd144f36f46c9b3bcc82eb35fa37aa3ff840e8d50760c5fb50a95d"} Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.767257 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mrkgj" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.767477 5010 scope.go:117] "RemoveContainer" containerID="491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.814579 5010 scope.go:117] "RemoveContainer" containerID="b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.828560 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrkgj"] Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.847018 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mrkgj"] Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.912405 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" path="/var/lib/kubelet/pods/1364c333-abfe-4947-83fc-cfe9adb0c252/volumes" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.914948 5010 scope.go:117] "RemoveContainer" containerID="404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.936907 5010 scope.go:117] "RemoveContainer" containerID="491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01" Nov 26 16:46:09 crc kubenswrapper[5010]: E1126 16:46:09.937386 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01\": container with ID starting with 491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01 not found: ID does not exist" containerID="491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.937450 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01"} err="failed to get container status \"491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01\": rpc error: code = NotFound desc = could not find container \"491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01\": container with ID starting with 491507c1860824ac5bca14398b5ee70e914f8e2d25d07c68e7ad1d8b24a66b01 not found: ID does not exist" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.937491 5010 scope.go:117] "RemoveContainer" containerID="b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31" Nov 26 16:46:09 crc kubenswrapper[5010]: E1126 16:46:09.937976 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31\": container with ID starting with b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31 not found: ID does not exist" containerID="b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.938040 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31"} err="failed to get container status \"b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31\": rpc error: code = NotFound desc = could not find container \"b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31\": container with ID starting with b3490a032afc827e57d1c11cb494a77ba452b1f80c05cc3b37bebbff10ac3c31 not found: ID does not exist" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.938077 5010 scope.go:117] "RemoveContainer" containerID="404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c" Nov 26 16:46:09 crc kubenswrapper[5010]: E1126 16:46:09.938517 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c\": container with ID starting with 404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c not found: ID does not exist" containerID="404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c" Nov 26 16:46:09 crc kubenswrapper[5010]: I1126 16:46:09.938561 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c"} err="failed to get container status \"404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c\": rpc error: code = NotFound desc = could not find container \"404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c\": container with ID starting with 404a7ab07a5efe61104de5d49e40090b0b976cd6b43b6be79e5ae25b1f3e829c not found: ID does not exist" Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.422864 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.422965 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.423041 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.423994 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"569ad5c2cdd4f1854f3e605f7633f5753ed4e4e3de2ce020afe82ef2326c961c"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.424096 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://569ad5c2cdd4f1854f3e605f7633f5753ed4e4e3de2ce020afe82ef2326c961c" gracePeriod=600 Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.795909 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="569ad5c2cdd4f1854f3e605f7633f5753ed4e4e3de2ce020afe82ef2326c961c" exitCode=0 Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.795969 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"569ad5c2cdd4f1854f3e605f7633f5753ed4e4e3de2ce020afe82ef2326c961c"} Nov 26 16:46:11 crc kubenswrapper[5010]: I1126 16:46:11.796501 5010 scope.go:117] "RemoveContainer" containerID="78620c9bfa4848f72b906481924a4f52d770aa3f0132d4c88384319b4345e3ff" Nov 26 16:46:12 crc kubenswrapper[5010]: I1126 16:46:12.808194 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c"} Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.066586 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sm5mr"] Nov 26 16:47:08 crc kubenswrapper[5010]: E1126 16:47:08.068477 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="registry-server" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.068510 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="registry-server" Nov 26 16:47:08 crc kubenswrapper[5010]: E1126 16:47:08.068549 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="extract-utilities" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.068567 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="extract-utilities" Nov 26 16:47:08 crc kubenswrapper[5010]: E1126 16:47:08.068618 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="extract-content" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.068636 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="extract-content" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.069040 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1364c333-abfe-4947-83fc-cfe9adb0c252" containerName="registry-server" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.071516 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.083618 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sm5mr"] Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.199768 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-catalog-content\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.199992 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9wrm\" (UniqueName: \"kubernetes.io/projected/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-kube-api-access-h9wrm\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.200245 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-utilities\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.302587 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-utilities\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.302666 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-catalog-content\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.302805 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9wrm\" (UniqueName: \"kubernetes.io/projected/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-kube-api-access-h9wrm\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.303451 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-utilities\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.303825 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-catalog-content\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.327751 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9wrm\" (UniqueName: \"kubernetes.io/projected/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-kube-api-access-h9wrm\") pod \"certified-operators-sm5mr\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.400724 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:08 crc kubenswrapper[5010]: I1126 16:47:08.981145 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sm5mr"] Nov 26 16:47:09 crc kubenswrapper[5010]: I1126 16:47:09.367919 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sm5mr" event={"ID":"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972","Type":"ContainerStarted","Data":"b0dc9cad230e71f295bb573432d7eb28962d03fa7601483df0477cf7155d339c"} Nov 26 16:47:09 crc kubenswrapper[5010]: E1126 16:47:09.610791 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb54acd34_9bcf_4b3d_aea8_9b30e7dd6972.slice/crio-conmon-e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:47:10 crc kubenswrapper[5010]: I1126 16:47:10.380799 5010 generic.go:334] "Generic (PLEG): container finished" podID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerID="e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56" exitCode=0 Nov 26 16:47:10 crc kubenswrapper[5010]: I1126 16:47:10.380880 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sm5mr" event={"ID":"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972","Type":"ContainerDied","Data":"e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56"} Nov 26 16:47:14 crc kubenswrapper[5010]: I1126 16:47:14.424109 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sm5mr" event={"ID":"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972","Type":"ContainerStarted","Data":"32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8"} Nov 26 16:47:15 crc kubenswrapper[5010]: I1126 16:47:15.438413 5010 generic.go:334] "Generic (PLEG): container finished" podID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerID="32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8" exitCode=0 Nov 26 16:47:15 crc kubenswrapper[5010]: I1126 16:47:15.438476 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sm5mr" event={"ID":"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972","Type":"ContainerDied","Data":"32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8"} Nov 26 16:47:16 crc kubenswrapper[5010]: I1126 16:47:16.453439 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sm5mr" event={"ID":"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972","Type":"ContainerStarted","Data":"612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c"} Nov 26 16:47:16 crc kubenswrapper[5010]: I1126 16:47:16.480602 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sm5mr" podStartSLOduration=2.88848955 podStartE2EDuration="8.480582697s" podCreationTimestamp="2025-11-26 16:47:08 +0000 UTC" firstStartedPulling="2025-11-26 16:47:10.384145461 +0000 UTC m=+4851.174862639" lastFinishedPulling="2025-11-26 16:47:15.976238598 +0000 UTC m=+4856.766955786" observedRunningTime="2025-11-26 16:47:16.478019083 +0000 UTC m=+4857.268736271" watchObservedRunningTime="2025-11-26 16:47:16.480582697 +0000 UTC m=+4857.271299845" Nov 26 16:47:18 crc kubenswrapper[5010]: I1126 16:47:18.401425 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:18 crc kubenswrapper[5010]: I1126 16:47:18.403015 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:18 crc kubenswrapper[5010]: I1126 16:47:18.458473 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:28 crc kubenswrapper[5010]: I1126 16:47:28.451350 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:47:28 crc kubenswrapper[5010]: I1126 16:47:28.530922 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sm5mr"] Nov 26 16:47:28 crc kubenswrapper[5010]: I1126 16:47:28.563184 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4mtn9"] Nov 26 16:47:28 crc kubenswrapper[5010]: I1126 16:47:28.563472 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4mtn9" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="registry-server" containerID="cri-o://8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255" gracePeriod=2 Nov 26 16:47:28 crc kubenswrapper[5010]: I1126 16:47:28.929007 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.035986 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-catalog-content\") pod \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.036042 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-utilities\") pod \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.036122 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gm8b\" (UniqueName: \"kubernetes.io/projected/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-kube-api-access-9gm8b\") pod \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\" (UID: \"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd\") " Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.037018 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-utilities" (OuterVolumeSpecName: "utilities") pod "4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" (UID: "4c9c65d3-d16f-49f0-8d23-82a24ad65fcd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.043668 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-kube-api-access-9gm8b" (OuterVolumeSpecName: "kube-api-access-9gm8b") pod "4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" (UID: "4c9c65d3-d16f-49f0-8d23-82a24ad65fcd"). InnerVolumeSpecName "kube-api-access-9gm8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.088412 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" (UID: "4c9c65d3-d16f-49f0-8d23-82a24ad65fcd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.137595 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.137640 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.137654 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gm8b\" (UniqueName: \"kubernetes.io/projected/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd-kube-api-access-9gm8b\") on node \"crc\" DevicePath \"\"" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.582927 5010 generic.go:334] "Generic (PLEG): container finished" podID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerID="8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255" exitCode=0 Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.583006 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4mtn9" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.582998 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mtn9" event={"ID":"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd","Type":"ContainerDied","Data":"8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255"} Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.583363 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4mtn9" event={"ID":"4c9c65d3-d16f-49f0-8d23-82a24ad65fcd","Type":"ContainerDied","Data":"5a85ced1d63bd1c6936764f3e7a7d8418adbba5bd741a0dea9a9f55480f39067"} Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.583388 5010 scope.go:117] "RemoveContainer" containerID="8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.605946 5010 scope.go:117] "RemoveContainer" containerID="5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.619137 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4mtn9"] Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.627604 5010 scope.go:117] "RemoveContainer" containerID="241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.633272 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4mtn9"] Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.659372 5010 scope.go:117] "RemoveContainer" containerID="8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255" Nov 26 16:47:29 crc kubenswrapper[5010]: E1126 16:47:29.659921 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255\": container with ID starting with 8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255 not found: ID does not exist" containerID="8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.659950 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255"} err="failed to get container status \"8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255\": rpc error: code = NotFound desc = could not find container \"8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255\": container with ID starting with 8c0343146afc7896596ae9c44e9725974e5fedbec94fbfcf1213590d16cec255 not found: ID does not exist" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.659971 5010 scope.go:117] "RemoveContainer" containerID="5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857" Nov 26 16:47:29 crc kubenswrapper[5010]: E1126 16:47:29.660458 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857\": container with ID starting with 5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857 not found: ID does not exist" containerID="5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.660478 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857"} err="failed to get container status \"5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857\": rpc error: code = NotFound desc = could not find container \"5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857\": container with ID starting with 5fc6e7778e2b6797ebf191ddc9e5d76e4952cdb3c989ed7bb4b1164a88217857 not found: ID does not exist" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.660491 5010 scope.go:117] "RemoveContainer" containerID="241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759" Nov 26 16:47:29 crc kubenswrapper[5010]: E1126 16:47:29.660819 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759\": container with ID starting with 241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759 not found: ID does not exist" containerID="241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.660844 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759"} err="failed to get container status \"241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759\": rpc error: code = NotFound desc = could not find container \"241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759\": container with ID starting with 241efb04cdd4843c6efb52886ae32a2e10add315c6c5f7a145a4d8c36ea6e759 not found: ID does not exist" Nov 26 16:47:29 crc kubenswrapper[5010]: I1126 16:47:29.899442 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" path="/var/lib/kubelet/pods/4c9c65d3-d16f-49f0-8d23-82a24ad65fcd/volumes" Nov 26 16:48:11 crc kubenswrapper[5010]: I1126 16:48:11.422693 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:48:11 crc kubenswrapper[5010]: I1126 16:48:11.423317 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:48:41 crc kubenswrapper[5010]: I1126 16:48:41.422334 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:48:41 crc kubenswrapper[5010]: I1126 16:48:41.423115 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.423581 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.424243 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.424311 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.425309 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.425415 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" gracePeriod=600 Nov 26 16:49:11 crc kubenswrapper[5010]: E1126 16:49:11.553958 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.574565 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" exitCode=0 Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.574631 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c"} Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.574672 5010 scope.go:117] "RemoveContainer" containerID="569ad5c2cdd4f1854f3e605f7633f5753ed4e4e3de2ce020afe82ef2326c961c" Nov 26 16:49:11 crc kubenswrapper[5010]: I1126 16:49:11.575775 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:49:11 crc kubenswrapper[5010]: E1126 16:49:11.576343 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:49:24 crc kubenswrapper[5010]: I1126 16:49:24.892123 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:49:24 crc kubenswrapper[5010]: E1126 16:49:24.893366 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:49:38 crc kubenswrapper[5010]: I1126 16:49:38.892424 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:49:38 crc kubenswrapper[5010]: E1126 16:49:38.893564 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:49:49 crc kubenswrapper[5010]: I1126 16:49:49.898231 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:49:49 crc kubenswrapper[5010]: E1126 16:49:49.898867 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.075760 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-fxchh"] Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.087090 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-fxchh"] Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.206243 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-lg77b"] Nov 26 16:50:00 crc kubenswrapper[5010]: E1126 16:50:00.206890 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="extract-content" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.206927 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="extract-content" Nov 26 16:50:00 crc kubenswrapper[5010]: E1126 16:50:00.206949 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="extract-utilities" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.206967 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="extract-utilities" Nov 26 16:50:00 crc kubenswrapper[5010]: E1126 16:50:00.207005 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="registry-server" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.207024 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="registry-server" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.207376 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c9c65d3-d16f-49f0-8d23-82a24ad65fcd" containerName="registry-server" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.208489 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.213894 5010 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-pb5m9" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.214429 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.215176 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.215788 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.221901 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-lg77b"] Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.311280 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/26e21179-4f22-4b05-b2d0-200ddeb003c4-crc-storage\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.311413 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/26e21179-4f22-4b05-b2d0-200ddeb003c4-node-mnt\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.311595 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf5dr\" (UniqueName: \"kubernetes.io/projected/26e21179-4f22-4b05-b2d0-200ddeb003c4-kube-api-access-zf5dr\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.412813 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/26e21179-4f22-4b05-b2d0-200ddeb003c4-node-mnt\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.412907 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf5dr\" (UniqueName: \"kubernetes.io/projected/26e21179-4f22-4b05-b2d0-200ddeb003c4-kube-api-access-zf5dr\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.412952 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/26e21179-4f22-4b05-b2d0-200ddeb003c4-crc-storage\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.413499 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/26e21179-4f22-4b05-b2d0-200ddeb003c4-node-mnt\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.413680 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/26e21179-4f22-4b05-b2d0-200ddeb003c4-crc-storage\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.447855 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf5dr\" (UniqueName: \"kubernetes.io/projected/26e21179-4f22-4b05-b2d0-200ddeb003c4-kube-api-access-zf5dr\") pod \"crc-storage-crc-lg77b\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.547604 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:00 crc kubenswrapper[5010]: I1126 16:50:00.891859 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:50:00 crc kubenswrapper[5010]: E1126 16:50:00.892483 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:50:01 crc kubenswrapper[5010]: I1126 16:50:01.003738 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-lg77b"] Nov 26 16:50:01 crc kubenswrapper[5010]: I1126 16:50:01.017016 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:50:01 crc kubenswrapper[5010]: I1126 16:50:01.054292 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-lg77b" event={"ID":"26e21179-4f22-4b05-b2d0-200ddeb003c4","Type":"ContainerStarted","Data":"ac4bef3c868155ec74696a30bba8dfb2a359c3c636d35363a007405538cd2662"} Nov 26 16:50:01 crc kubenswrapper[5010]: I1126 16:50:01.902227 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1757a1c9-a540-48c7-9943-19e8bc559556" path="/var/lib/kubelet/pods/1757a1c9-a540-48c7-9943-19e8bc559556/volumes" Nov 26 16:50:02 crc kubenswrapper[5010]: I1126 16:50:02.068817 5010 generic.go:334] "Generic (PLEG): container finished" podID="26e21179-4f22-4b05-b2d0-200ddeb003c4" containerID="684fd4866228c14fd55457446cfd74dcb37f9fd5c9d6bd860f5bd253b2726a62" exitCode=0 Nov 26 16:50:02 crc kubenswrapper[5010]: I1126 16:50:02.068885 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-lg77b" event={"ID":"26e21179-4f22-4b05-b2d0-200ddeb003c4","Type":"ContainerDied","Data":"684fd4866228c14fd55457446cfd74dcb37f9fd5c9d6bd860f5bd253b2726a62"} Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.434779 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.561769 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zf5dr\" (UniqueName: \"kubernetes.io/projected/26e21179-4f22-4b05-b2d0-200ddeb003c4-kube-api-access-zf5dr\") pod \"26e21179-4f22-4b05-b2d0-200ddeb003c4\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.561859 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/26e21179-4f22-4b05-b2d0-200ddeb003c4-node-mnt\") pod \"26e21179-4f22-4b05-b2d0-200ddeb003c4\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.561906 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/26e21179-4f22-4b05-b2d0-200ddeb003c4-crc-storage\") pod \"26e21179-4f22-4b05-b2d0-200ddeb003c4\" (UID: \"26e21179-4f22-4b05-b2d0-200ddeb003c4\") " Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.562049 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/26e21179-4f22-4b05-b2d0-200ddeb003c4-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "26e21179-4f22-4b05-b2d0-200ddeb003c4" (UID: "26e21179-4f22-4b05-b2d0-200ddeb003c4"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.562175 5010 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/26e21179-4f22-4b05-b2d0-200ddeb003c4-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.569475 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26e21179-4f22-4b05-b2d0-200ddeb003c4-kube-api-access-zf5dr" (OuterVolumeSpecName: "kube-api-access-zf5dr") pod "26e21179-4f22-4b05-b2d0-200ddeb003c4" (UID: "26e21179-4f22-4b05-b2d0-200ddeb003c4"). InnerVolumeSpecName "kube-api-access-zf5dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.594213 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/26e21179-4f22-4b05-b2d0-200ddeb003c4-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "26e21179-4f22-4b05-b2d0-200ddeb003c4" (UID: "26e21179-4f22-4b05-b2d0-200ddeb003c4"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.663917 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zf5dr\" (UniqueName: \"kubernetes.io/projected/26e21179-4f22-4b05-b2d0-200ddeb003c4-kube-api-access-zf5dr\") on node \"crc\" DevicePath \"\"" Nov 26 16:50:03 crc kubenswrapper[5010]: I1126 16:50:03.663968 5010 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/26e21179-4f22-4b05-b2d0-200ddeb003c4-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 26 16:50:04 crc kubenswrapper[5010]: I1126 16:50:04.085274 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-lg77b" event={"ID":"26e21179-4f22-4b05-b2d0-200ddeb003c4","Type":"ContainerDied","Data":"ac4bef3c868155ec74696a30bba8dfb2a359c3c636d35363a007405538cd2662"} Nov 26 16:50:04 crc kubenswrapper[5010]: I1126 16:50:04.085321 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-lg77b" Nov 26 16:50:04 crc kubenswrapper[5010]: I1126 16:50:04.085335 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac4bef3c868155ec74696a30bba8dfb2a359c3c636d35363a007405538cd2662" Nov 26 16:50:04 crc kubenswrapper[5010]: I1126 16:50:04.837225 5010 scope.go:117] "RemoveContainer" containerID="0e2d25cd12a82c87aa75f3fc80455038c19edb34d3d9698056e6b3d77b2df691" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.745961 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["crc-storage/crc-storage-crc-lg77b"] Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.756698 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["crc-storage/crc-storage-crc-lg77b"] Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.909673 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26e21179-4f22-4b05-b2d0-200ddeb003c4" path="/var/lib/kubelet/pods/26e21179-4f22-4b05-b2d0-200ddeb003c4/volumes" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.921142 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-zbgqt"] Nov 26 16:50:05 crc kubenswrapper[5010]: E1126 16:50:05.921690 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26e21179-4f22-4b05-b2d0-200ddeb003c4" containerName="storage" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.921752 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="26e21179-4f22-4b05-b2d0-200ddeb003c4" containerName="storage" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.922081 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="26e21179-4f22-4b05-b2d0-200ddeb003c4" containerName="storage" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.923023 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.929796 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.930205 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.932444 5010 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-pb5m9" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.932477 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.933753 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-zbgqt"] Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.998196 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2259a221-7e89-4e4f-bf9a-9e63a307a441-node-mnt\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.998535 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2259a221-7e89-4e4f-bf9a-9e63a307a441-crc-storage\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:05 crc kubenswrapper[5010]: I1126 16:50:05.998979 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vb5g6\" (UniqueName: \"kubernetes.io/projected/2259a221-7e89-4e4f-bf9a-9e63a307a441-kube-api-access-vb5g6\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.101140 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vb5g6\" (UniqueName: \"kubernetes.io/projected/2259a221-7e89-4e4f-bf9a-9e63a307a441-kube-api-access-vb5g6\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.101270 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2259a221-7e89-4e4f-bf9a-9e63a307a441-node-mnt\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.101485 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2259a221-7e89-4e4f-bf9a-9e63a307a441-crc-storage\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.101993 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2259a221-7e89-4e4f-bf9a-9e63a307a441-node-mnt\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.102529 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2259a221-7e89-4e4f-bf9a-9e63a307a441-crc-storage\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.138742 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vb5g6\" (UniqueName: \"kubernetes.io/projected/2259a221-7e89-4e4f-bf9a-9e63a307a441-kube-api-access-vb5g6\") pod \"crc-storage-crc-zbgqt\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.255429 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:06 crc kubenswrapper[5010]: I1126 16:50:06.566692 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-zbgqt"] Nov 26 16:50:06 crc kubenswrapper[5010]: W1126 16:50:06.569841 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2259a221_7e89_4e4f_bf9a_9e63a307a441.slice/crio-8f062b1d6efc5bc4028fdd2a83b127f9678a2f6d6b1ac29ef8a2b9b3ff2197f5 WatchSource:0}: Error finding container 8f062b1d6efc5bc4028fdd2a83b127f9678a2f6d6b1ac29ef8a2b9b3ff2197f5: Status 404 returned error can't find the container with id 8f062b1d6efc5bc4028fdd2a83b127f9678a2f6d6b1ac29ef8a2b9b3ff2197f5 Nov 26 16:50:07 crc kubenswrapper[5010]: I1126 16:50:07.121305 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-zbgqt" event={"ID":"2259a221-7e89-4e4f-bf9a-9e63a307a441","Type":"ContainerStarted","Data":"8f062b1d6efc5bc4028fdd2a83b127f9678a2f6d6b1ac29ef8a2b9b3ff2197f5"} Nov 26 16:50:08 crc kubenswrapper[5010]: I1126 16:50:08.131014 5010 generic.go:334] "Generic (PLEG): container finished" podID="2259a221-7e89-4e4f-bf9a-9e63a307a441" containerID="a18a5484c77745fcc586047e1862b361a406eca3b471d87dd6537bd259659a0b" exitCode=0 Nov 26 16:50:08 crc kubenswrapper[5010]: I1126 16:50:08.131108 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-zbgqt" event={"ID":"2259a221-7e89-4e4f-bf9a-9e63a307a441","Type":"ContainerDied","Data":"a18a5484c77745fcc586047e1862b361a406eca3b471d87dd6537bd259659a0b"} Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.439917 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.554739 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2259a221-7e89-4e4f-bf9a-9e63a307a441-crc-storage\") pod \"2259a221-7e89-4e4f-bf9a-9e63a307a441\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.554793 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2259a221-7e89-4e4f-bf9a-9e63a307a441-node-mnt\") pod \"2259a221-7e89-4e4f-bf9a-9e63a307a441\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.554922 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vb5g6\" (UniqueName: \"kubernetes.io/projected/2259a221-7e89-4e4f-bf9a-9e63a307a441-kube-api-access-vb5g6\") pod \"2259a221-7e89-4e4f-bf9a-9e63a307a441\" (UID: \"2259a221-7e89-4e4f-bf9a-9e63a307a441\") " Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.555017 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2259a221-7e89-4e4f-bf9a-9e63a307a441-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "2259a221-7e89-4e4f-bf9a-9e63a307a441" (UID: "2259a221-7e89-4e4f-bf9a-9e63a307a441"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.555384 5010 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/2259a221-7e89-4e4f-bf9a-9e63a307a441-node-mnt\") on node \"crc\" DevicePath \"\"" Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.562229 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2259a221-7e89-4e4f-bf9a-9e63a307a441-kube-api-access-vb5g6" (OuterVolumeSpecName: "kube-api-access-vb5g6") pod "2259a221-7e89-4e4f-bf9a-9e63a307a441" (UID: "2259a221-7e89-4e4f-bf9a-9e63a307a441"). InnerVolumeSpecName "kube-api-access-vb5g6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.598397 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2259a221-7e89-4e4f-bf9a-9e63a307a441-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "2259a221-7e89-4e4f-bf9a-9e63a307a441" (UID: "2259a221-7e89-4e4f-bf9a-9e63a307a441"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.656725 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vb5g6\" (UniqueName: \"kubernetes.io/projected/2259a221-7e89-4e4f-bf9a-9e63a307a441-kube-api-access-vb5g6\") on node \"crc\" DevicePath \"\"" Nov 26 16:50:09 crc kubenswrapper[5010]: I1126 16:50:09.656769 5010 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/2259a221-7e89-4e4f-bf9a-9e63a307a441-crc-storage\") on node \"crc\" DevicePath \"\"" Nov 26 16:50:10 crc kubenswrapper[5010]: I1126 16:50:10.149487 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-zbgqt" event={"ID":"2259a221-7e89-4e4f-bf9a-9e63a307a441","Type":"ContainerDied","Data":"8f062b1d6efc5bc4028fdd2a83b127f9678a2f6d6b1ac29ef8a2b9b3ff2197f5"} Nov 26 16:50:10 crc kubenswrapper[5010]: I1126 16:50:10.149538 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f062b1d6efc5bc4028fdd2a83b127f9678a2f6d6b1ac29ef8a2b9b3ff2197f5" Nov 26 16:50:10 crc kubenswrapper[5010]: I1126 16:50:10.149551 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-zbgqt" Nov 26 16:50:14 crc kubenswrapper[5010]: I1126 16:50:14.891852 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:50:14 crc kubenswrapper[5010]: E1126 16:50:14.893094 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:50:26 crc kubenswrapper[5010]: I1126 16:50:26.892783 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:50:26 crc kubenswrapper[5010]: E1126 16:50:26.893964 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:50:39 crc kubenswrapper[5010]: I1126 16:50:39.900507 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:50:39 crc kubenswrapper[5010]: E1126 16:50:39.901815 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:50:50 crc kubenswrapper[5010]: I1126 16:50:50.891947 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:50:50 crc kubenswrapper[5010]: E1126 16:50:50.893052 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:51:02 crc kubenswrapper[5010]: I1126 16:51:02.892269 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:51:02 crc kubenswrapper[5010]: E1126 16:51:02.893655 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:51:14 crc kubenswrapper[5010]: I1126 16:51:14.891592 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:51:14 crc kubenswrapper[5010]: E1126 16:51:14.893631 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:51:28 crc kubenswrapper[5010]: I1126 16:51:28.891969 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:51:28 crc kubenswrapper[5010]: E1126 16:51:28.892991 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:51:40 crc kubenswrapper[5010]: I1126 16:51:40.892030 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:51:40 crc kubenswrapper[5010]: E1126 16:51:40.892914 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:51:52 crc kubenswrapper[5010]: I1126 16:51:52.892051 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:51:52 crc kubenswrapper[5010]: E1126 16:51:52.893150 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:52:04 crc kubenswrapper[5010]: I1126 16:52:04.891108 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:52:04 crc kubenswrapper[5010]: E1126 16:52:04.891822 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.315363 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-99jjz"] Nov 26 16:52:10 crc kubenswrapper[5010]: E1126 16:52:10.316101 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2259a221-7e89-4e4f-bf9a-9e63a307a441" containerName="storage" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.316112 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2259a221-7e89-4e4f-bf9a-9e63a307a441" containerName="storage" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.316274 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2259a221-7e89-4e4f-bf9a-9e63a307a441" containerName="storage" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.316986 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.318586 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.318764 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.318783 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-gh2kc" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.324428 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.336609 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-99jjz"] Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.343218 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-w4257"] Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.344580 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.348369 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.358606 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-w4257"] Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.405253 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs87l\" (UniqueName: \"kubernetes.io/projected/902a83c5-7077-4310-95e0-22892fc63caf-kube-api-access-bs87l\") pod \"dnsmasq-dns-866449bdb9-99jjz\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.405532 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/902a83c5-7077-4310-95e0-22892fc63caf-config\") pod \"dnsmasq-dns-866449bdb9-99jjz\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.506848 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/902a83c5-7077-4310-95e0-22892fc63caf-config\") pod \"dnsmasq-dns-866449bdb9-99jjz\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.507096 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-config\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.507187 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-dns-svc\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.507281 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljbjf\" (UniqueName: \"kubernetes.io/projected/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-kube-api-access-ljbjf\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.507392 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs87l\" (UniqueName: \"kubernetes.io/projected/902a83c5-7077-4310-95e0-22892fc63caf-kube-api-access-bs87l\") pod \"dnsmasq-dns-866449bdb9-99jjz\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.508421 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/902a83c5-7077-4310-95e0-22892fc63caf-config\") pod \"dnsmasq-dns-866449bdb9-99jjz\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.545609 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs87l\" (UniqueName: \"kubernetes.io/projected/902a83c5-7077-4310-95e0-22892fc63caf-kube-api-access-bs87l\") pod \"dnsmasq-dns-866449bdb9-99jjz\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.609255 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-config\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.609306 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-dns-svc\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.609350 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljbjf\" (UniqueName: \"kubernetes.io/projected/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-kube-api-access-ljbjf\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.610178 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-config\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.610465 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-dns-svc\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.626176 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljbjf\" (UniqueName: \"kubernetes.io/projected/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-kube-api-access-ljbjf\") pod \"dnsmasq-dns-55c86457d7-w4257\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.633183 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.657697 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.876089 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-w4257"] Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.911521 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-l2nwx"] Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.913095 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.917482 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-l2nwx"] Nov 26 16:52:10 crc kubenswrapper[5010]: I1126 16:52:10.926235 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-99jjz"] Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.018541 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78rbl\" (UniqueName: \"kubernetes.io/projected/6b876b1b-eec1-485c-a227-5fa05c22add7-kube-api-access-78rbl\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.018757 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-dns-svc\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.018829 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-config\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.120209 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-dns-svc\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.120277 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-config\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.120329 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78rbl\" (UniqueName: \"kubernetes.io/projected/6b876b1b-eec1-485c-a227-5fa05c22add7-kube-api-access-78rbl\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.121441 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-dns-svc\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.121479 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-config\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.143618 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78rbl\" (UniqueName: \"kubernetes.io/projected/6b876b1b-eec1-485c-a227-5fa05c22add7-kube-api-access-78rbl\") pod \"dnsmasq-dns-f4c6c447c-l2nwx\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.258355 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-99jjz"] Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.261534 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.293664 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-w4257"] Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.315300 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" event={"ID":"902a83c5-7077-4310-95e0-22892fc63caf","Type":"ContainerStarted","Data":"926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7"} Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.315344 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" event={"ID":"902a83c5-7077-4310-95e0-22892fc63caf","Type":"ContainerStarted","Data":"7d37cdaa5ef1d87681556af618da755a3068232b2e928bcb8432a0d7521aa5c8"} Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.319149 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-964dp"] Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.320669 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: W1126 16:52:11.353739 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3a2c86b5_bf9c_493d_8f93_99d2e2e94e4b.slice/crio-27dd146d0f49814b07f4a69a28718615f6daa734a194c038d486ef1b4a14cc19 WatchSource:0}: Error finding container 27dd146d0f49814b07f4a69a28718615f6daa734a194c038d486ef1b4a14cc19: Status 404 returned error can't find the container with id 27dd146d0f49814b07f4a69a28718615f6daa734a194c038d486ef1b4a14cc19 Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.354418 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-964dp"] Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.425542 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-config\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.425591 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d9s8p\" (UniqueName: \"kubernetes.io/projected/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-kube-api-access-d9s8p\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.425680 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-dns-svc\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.527227 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-dns-svc\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.527296 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-config\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.527333 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d9s8p\" (UniqueName: \"kubernetes.io/projected/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-kube-api-access-d9s8p\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.528021 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-dns-svc\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.528212 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-config\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.541971 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d9s8p\" (UniqueName: \"kubernetes.io/projected/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-kube-api-access-d9s8p\") pod \"dnsmasq-dns-59c6c64b5c-964dp\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.668511 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:11 crc kubenswrapper[5010]: I1126 16:52:11.777770 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-l2nwx"] Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.043337 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.044891 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.046816 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-2kpf2" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.047039 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.047137 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.047229 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.047363 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.047491 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.047612 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.060371 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.138243 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e9eb49e1-ceca-4317-bd3d-8074787001e4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.138399 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z29jx\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-kube-api-access-z29jx\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.138505 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.138601 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.138904 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.139257 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.139325 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.139427 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.139533 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e9eb49e1-ceca-4317-bd3d-8074787001e4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.139597 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.139892 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.143510 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-964dp"] Nov 26 16:52:12 crc kubenswrapper[5010]: W1126 16:52:12.144227 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67e2ac4e_3adc_4d5b_bd12_20f6c705bf1d.slice/crio-40a589fe8a1c2928856bf675fcfde86280230348dece1fdd7644ea34a9a97e36 WatchSource:0}: Error finding container 40a589fe8a1c2928856bf675fcfde86280230348dece1fdd7644ea34a9a97e36: Status 404 returned error can't find the container with id 40a589fe8a1c2928856bf675fcfde86280230348dece1fdd7644ea34a9a97e36 Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240786 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e9eb49e1-ceca-4317-bd3d-8074787001e4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240823 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z29jx\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-kube-api-access-z29jx\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240851 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240880 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240903 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240928 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240943 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.240975 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.241007 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e9eb49e1-ceca-4317-bd3d-8074787001e4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.241025 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.241040 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.241456 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.242517 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.242595 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.243765 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.244077 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.247470 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.247722 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e9eb49e1-ceca-4317-bd3d-8074787001e4-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.248104 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e9eb49e1-ceca-4317-bd3d-8074787001e4-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.249057 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.250535 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.250575 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2498bea4bb787ce3fa7dbd459d0da0f5571f9929b0ff125c01dcb0b60df1edb8/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.263937 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z29jx\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-kube-api-access-z29jx\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.282471 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.326185 5010 generic.go:334] "Generic (PLEG): container finished" podID="3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" containerID="7eacaa3a5bed25fcc2888e0a66344060915953f674acf318cb29e861d926314c" exitCode=0 Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.326398 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c86457d7-w4257" event={"ID":"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b","Type":"ContainerDied","Data":"7eacaa3a5bed25fcc2888e0a66344060915953f674acf318cb29e861d926314c"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.326430 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c86457d7-w4257" event={"ID":"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b","Type":"ContainerStarted","Data":"27dd146d0f49814b07f4a69a28718615f6daa734a194c038d486ef1b4a14cc19"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.332477 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" event={"ID":"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d","Type":"ContainerStarted","Data":"983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.332586 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" event={"ID":"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d","Type":"ContainerStarted","Data":"40a589fe8a1c2928856bf675fcfde86280230348dece1fdd7644ea34a9a97e36"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.334754 5010 generic.go:334] "Generic (PLEG): container finished" podID="902a83c5-7077-4310-95e0-22892fc63caf" containerID="926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7" exitCode=0 Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.334831 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" event={"ID":"902a83c5-7077-4310-95e0-22892fc63caf","Type":"ContainerDied","Data":"926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.334852 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" podUID="902a83c5-7077-4310-95e0-22892fc63caf" containerName="dnsmasq-dns" containerID="cri-o://dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236" gracePeriod=10 Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.334860 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" event={"ID":"902a83c5-7077-4310-95e0-22892fc63caf","Type":"ContainerStarted","Data":"dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.334992 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.344276 5010 generic.go:334] "Generic (PLEG): container finished" podID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerID="92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc" exitCode=0 Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.344312 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" event={"ID":"6b876b1b-eec1-485c-a227-5fa05c22add7","Type":"ContainerDied","Data":"92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.344334 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" event={"ID":"6b876b1b-eec1-485c-a227-5fa05c22add7","Type":"ContainerStarted","Data":"40ac590e501f763410cc788892df93833f3b9eba916ab4221180135f23d2ff16"} Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.363727 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" podStartSLOduration=2.363696229 podStartE2EDuration="2.363696229s" podCreationTimestamp="2025-11-26 16:52:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:12.353460124 +0000 UTC m=+5153.144177282" watchObservedRunningTime="2025-11-26 16:52:12.363696229 +0000 UTC m=+5153.154413377" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.374985 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.424669 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.429411 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.437556 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.437876 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-k2czn" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.438023 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.438158 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.439811 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.440403 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.440605 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.471915 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:52:12 crc kubenswrapper[5010]: E1126 16:52:12.540440 5010 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Nov 26 16:52:12 crc kubenswrapper[5010]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/6b876b1b-eec1-485c-a227-5fa05c22add7/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 26 16:52:12 crc kubenswrapper[5010]: > podSandboxID="40ac590e501f763410cc788892df93833f3b9eba916ab4221180135f23d2ff16" Nov 26 16:52:12 crc kubenswrapper[5010]: E1126 16:52:12.540612 5010 kuberuntime_manager.go:1274] "Unhandled Error" err=< Nov 26 16:52:12 crc kubenswrapper[5010]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nb6hc5h68h68h594h659hdbh679h65ch5f6hdch6h5b9h8fh55hfhf8h57fhc7h56ch687h669h559h678h5dhc7hf7h697h5d6h9ch669h54fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-78rbl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-f4c6c447c-l2nwx_openstack(6b876b1b-eec1-485c-a227-5fa05c22add7): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/6b876b1b-eec1-485c-a227-5fa05c22add7/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Nov 26 16:52:12 crc kubenswrapper[5010]: > logger="UnhandledError" Nov 26 16:52:12 crc kubenswrapper[5010]: E1126 16:52:12.541937 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/6b876b1b-eec1-485c-a227-5fa05c22add7/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557213 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557258 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557287 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-server-conf\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557319 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557349 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557369 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557391 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdz45\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-kube-api-access-jdz45\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557409 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557428 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/57796a15-e055-4685-bfe9-83da8320be25-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557444 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-config-data\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.557459 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/57796a15-e055-4685-bfe9-83da8320be25-pod-info\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.659476 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.659551 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.660592 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661072 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-server-conf\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661179 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661263 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661314 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661348 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdz45\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-kube-api-access-jdz45\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661371 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661419 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/57796a15-e055-4685-bfe9-83da8320be25-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661450 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-config-data\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.661497 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/57796a15-e055-4685-bfe9-83da8320be25-pod-info\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.662343 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-server-conf\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.663168 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.663365 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.664042 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-config-data\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.667524 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.667584 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/57796a15-e055-4685-bfe9-83da8320be25-pod-info\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.668856 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.668901 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ae0b79e21bdf677e9addbce9b06dc5ea6a1fabdcef32b98f2493c75fdcd03f4/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.672425 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.673035 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/57796a15-e055-4685-bfe9-83da8320be25-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.685205 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdz45\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-kube-api-access-jdz45\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.719478 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.732378 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.792784 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.796184 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.863755 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljbjf\" (UniqueName: \"kubernetes.io/projected/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-kube-api-access-ljbjf\") pod \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.863859 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/902a83c5-7077-4310-95e0-22892fc63caf-config\") pod \"902a83c5-7077-4310-95e0-22892fc63caf\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.863910 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-config\") pod \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.866277 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs87l\" (UniqueName: \"kubernetes.io/projected/902a83c5-7077-4310-95e0-22892fc63caf-kube-api-access-bs87l\") pod \"902a83c5-7077-4310-95e0-22892fc63caf\" (UID: \"902a83c5-7077-4310-95e0-22892fc63caf\") " Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.866337 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-dns-svc\") pod \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\" (UID: \"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b\") " Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.868053 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-kube-api-access-ljbjf" (OuterVolumeSpecName: "kube-api-access-ljbjf") pod "3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" (UID: "3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b"). InnerVolumeSpecName "kube-api-access-ljbjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.869779 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/902a83c5-7077-4310-95e0-22892fc63caf-kube-api-access-bs87l" (OuterVolumeSpecName: "kube-api-access-bs87l") pod "902a83c5-7077-4310-95e0-22892fc63caf" (UID: "902a83c5-7077-4310-95e0-22892fc63caf"). InnerVolumeSpecName "kube-api-access-bs87l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.883033 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" (UID: "3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.902753 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-config" (OuterVolumeSpecName: "config") pod "3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" (UID: "3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.917637 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/902a83c5-7077-4310-95e0-22892fc63caf-config" (OuterVolumeSpecName: "config") pod "902a83c5-7077-4310-95e0-22892fc63caf" (UID: "902a83c5-7077-4310-95e0-22892fc63caf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.932392 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.971593 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljbjf\" (UniqueName: \"kubernetes.io/projected/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-kube-api-access-ljbjf\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.971618 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/902a83c5-7077-4310-95e0-22892fc63caf-config\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.971628 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-config\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.971638 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs87l\" (UniqueName: \"kubernetes.io/projected/902a83c5-7077-4310-95e0-22892fc63caf-kube-api-access-bs87l\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:12 crc kubenswrapper[5010]: I1126 16:52:12.971646 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.019418 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 26 16:52:13 crc kubenswrapper[5010]: E1126 16:52:13.019862 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="902a83c5-7077-4310-95e0-22892fc63caf" containerName="init" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.019884 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="902a83c5-7077-4310-95e0-22892fc63caf" containerName="init" Nov 26 16:52:13 crc kubenswrapper[5010]: E1126 16:52:13.019929 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" containerName="init" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.019940 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" containerName="init" Nov 26 16:52:13 crc kubenswrapper[5010]: E1126 16:52:13.019965 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="902a83c5-7077-4310-95e0-22892fc63caf" containerName="dnsmasq-dns" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.019974 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="902a83c5-7077-4310-95e0-22892fc63caf" containerName="dnsmasq-dns" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.020141 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" containerName="init" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.020169 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="902a83c5-7077-4310-95e0-22892fc63caf" containerName="dnsmasq-dns" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.022611 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.028358 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.028633 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-w6dqt" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.028849 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.029058 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.036528 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.036876 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.174995 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.175101 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.175164 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-kolla-config\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.175219 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qhkp\" (UniqueName: \"kubernetes.io/projected/4456ea0d-01da-4a0a-b918-db686f0e23aa-kube-api-access-4qhkp\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.175252 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4456ea0d-01da-4a0a-b918-db686f0e23aa-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.175325 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-config-data-default\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.175395 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4456ea0d-01da-4a0a-b918-db686f0e23aa-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.175498 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4456ea0d-01da-4a0a-b918-db686f0e23aa-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.229602 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:52:13 crc kubenswrapper[5010]: W1126 16:52:13.234982 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod57796a15_e055_4685_bfe9_83da8320be25.slice/crio-6082ef88a32c79dc7b8cab3275c2457e56538ada693f4b4c357a87e3558b197a WatchSource:0}: Error finding container 6082ef88a32c79dc7b8cab3275c2457e56538ada693f4b4c357a87e3558b197a: Status 404 returned error can't find the container with id 6082ef88a32c79dc7b8cab3275c2457e56538ada693f4b4c357a87e3558b197a Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277266 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-config-data-default\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277345 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4456ea0d-01da-4a0a-b918-db686f0e23aa-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277372 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4456ea0d-01da-4a0a-b918-db686f0e23aa-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277405 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277451 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277507 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-kolla-config\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277542 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qhkp\" (UniqueName: \"kubernetes.io/projected/4456ea0d-01da-4a0a-b918-db686f0e23aa-kube-api-access-4qhkp\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.277560 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4456ea0d-01da-4a0a-b918-db686f0e23aa-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.278075 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/4456ea0d-01da-4a0a-b918-db686f0e23aa-config-data-generated\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.278360 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-kolla-config\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.278423 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-config-data-default\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.279495 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4456ea0d-01da-4a0a-b918-db686f0e23aa-operator-scripts\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.285264 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/4456ea0d-01da-4a0a-b918-db686f0e23aa-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.289520 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4456ea0d-01da-4a0a-b918-db686f0e23aa-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.296777 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qhkp\" (UniqueName: \"kubernetes.io/projected/4456ea0d-01da-4a0a-b918-db686f0e23aa-kube-api-access-4qhkp\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.360640 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.361900 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d97352cdd459bbe96ed8cc06d7e266b07f6eca91196dea336d73fd72f15e64d4/globalmount\"" pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.372540 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55c86457d7-w4257" event={"ID":"3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b","Type":"ContainerDied","Data":"27dd146d0f49814b07f4a69a28718615f6daa734a194c038d486ef1b4a14cc19"} Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.372589 5010 scope.go:117] "RemoveContainer" containerID="7eacaa3a5bed25fcc2888e0a66344060915953f674acf318cb29e861d926314c" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.372731 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55c86457d7-w4257" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.414656 5010 generic.go:334] "Generic (PLEG): container finished" podID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerID="983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56" exitCode=0 Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.415417 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" event={"ID":"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d","Type":"ContainerDied","Data":"983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56"} Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.421099 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"57796a15-e055-4685-bfe9-83da8320be25","Type":"ContainerStarted","Data":"6082ef88a32c79dc7b8cab3275c2457e56538ada693f4b4c357a87e3558b197a"} Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.437892 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e9eb49e1-ceca-4317-bd3d-8074787001e4","Type":"ContainerStarted","Data":"3a24a124186281252a2be13aa220f0af3e64b4afeb70f8d6a3c6a2a06fe68351"} Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.450345 5010 generic.go:334] "Generic (PLEG): container finished" podID="902a83c5-7077-4310-95e0-22892fc63caf" containerID="dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236" exitCode=0 Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.450924 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.451373 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" event={"ID":"902a83c5-7077-4310-95e0-22892fc63caf","Type":"ContainerDied","Data":"dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236"} Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.451419 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866449bdb9-99jjz" event={"ID":"902a83c5-7077-4310-95e0-22892fc63caf","Type":"ContainerDied","Data":"7d37cdaa5ef1d87681556af618da755a3068232b2e928bcb8432a0d7521aa5c8"} Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.451436 5010 scope.go:117] "RemoveContainer" containerID="dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.480079 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-w4257"] Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.497410 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55c86457d7-w4257"] Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.510189 5010 scope.go:117] "RemoveContainer" containerID="926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.557622 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-99jjz"] Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.569759 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-866449bdb9-99jjz"] Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.609862 5010 scope.go:117] "RemoveContainer" containerID="dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236" Nov 26 16:52:13 crc kubenswrapper[5010]: E1126 16:52:13.610269 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236\": container with ID starting with dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236 not found: ID does not exist" containerID="dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.610313 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236"} err="failed to get container status \"dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236\": rpc error: code = NotFound desc = could not find container \"dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236\": container with ID starting with dfce946ad51d43e56668750996d79b4276c53680412c44ce2039cdec2a451236 not found: ID does not exist" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.610342 5010 scope.go:117] "RemoveContainer" containerID="926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7" Nov 26 16:52:13 crc kubenswrapper[5010]: E1126 16:52:13.610641 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7\": container with ID starting with 926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7 not found: ID does not exist" containerID="926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.610661 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7"} err="failed to get container status \"926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7\": rpc error: code = NotFound desc = could not find container \"926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7\": container with ID starting with 926b507e01ecdc8c2b8c0d9707b2e24a9cd0f3e124846cbfb1a6cf5ff5b178e7 not found: ID does not exist" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.620379 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-dcf9c504-4a48-412c-bc2f-15bcdd3837bc\") pod \"openstack-galera-0\" (UID: \"4456ea0d-01da-4a0a-b918-db686f0e23aa\") " pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.666617 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.900794 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b" path="/var/lib/kubelet/pods/3a2c86b5-bf9c-493d-8f93-99d2e2e94e4b/volumes" Nov 26 16:52:13 crc kubenswrapper[5010]: I1126 16:52:13.901604 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="902a83c5-7077-4310-95e0-22892fc63caf" path="/var/lib/kubelet/pods/902a83c5-7077-4310-95e0-22892fc63caf/volumes" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.179579 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 26 16:52:14 crc kubenswrapper[5010]: W1126 16:52:14.195449 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4456ea0d_01da_4a0a_b918_db686f0e23aa.slice/crio-773ee9634b4739288ed17b51c88c0b87627d881cedec8b60453be1b538cc41d4 WatchSource:0}: Error finding container 773ee9634b4739288ed17b51c88c0b87627d881cedec8b60453be1b538cc41d4: Status 404 returned error can't find the container with id 773ee9634b4739288ed17b51c88c0b87627d881cedec8b60453be1b538cc41d4 Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.460534 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" event={"ID":"6b876b1b-eec1-485c-a227-5fa05c22add7","Type":"ContainerStarted","Data":"d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a"} Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.460976 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.463401 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4456ea0d-01da-4a0a-b918-db686f0e23aa","Type":"ContainerStarted","Data":"3e1c3bdf1adfa983f802cda03f48eeb541e1cc8a5ef0c59064bb533617b959d6"} Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.463441 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4456ea0d-01da-4a0a-b918-db686f0e23aa","Type":"ContainerStarted","Data":"773ee9634b4739288ed17b51c88c0b87627d881cedec8b60453be1b538cc41d4"} Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.466182 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" event={"ID":"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d","Type":"ContainerStarted","Data":"7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691"} Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.466326 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.468279 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"57796a15-e055-4685-bfe9-83da8320be25","Type":"ContainerStarted","Data":"b95a295f5f5e8676f0f589a7c4f39c6be62a9bff2f164d4fcccd10096ed82996"} Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.471494 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e9eb49e1-ceca-4317-bd3d-8074787001e4","Type":"ContainerStarted","Data":"6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1"} Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.485282 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" podStartSLOduration=4.485257333 podStartE2EDuration="4.485257333s" podCreationTimestamp="2025-11-26 16:52:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:14.476749371 +0000 UTC m=+5155.267466539" watchObservedRunningTime="2025-11-26 16:52:14.485257333 +0000 UTC m=+5155.275974481" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.514338 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.515511 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.524746 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.527397 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.527420 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.527532 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-lbk5p" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.540040 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.586104 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" podStartSLOduration=3.586085942 podStartE2EDuration="3.586085942s" podCreationTimestamp="2025-11-26 16:52:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:14.581397125 +0000 UTC m=+5155.372114273" watchObservedRunningTime="2025-11-26 16:52:14.586085942 +0000 UTC m=+5155.376803090" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603100 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603150 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603200 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/536595b1-5ba9-4588-8e64-32480adb79ea-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603243 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/536595b1-5ba9-4588-8e64-32480adb79ea-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603370 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603407 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603445 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fzs7g\" (UniqueName: \"kubernetes.io/projected/536595b1-5ba9-4588-8e64-32480adb79ea-kube-api-access-fzs7g\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.603474 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/536595b1-5ba9-4588-8e64-32480adb79ea-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.704737 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.704805 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/536595b1-5ba9-4588-8e64-32480adb79ea-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.704846 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/536595b1-5ba9-4588-8e64-32480adb79ea-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.704918 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.704946 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.704988 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fzs7g\" (UniqueName: \"kubernetes.io/projected/536595b1-5ba9-4588-8e64-32480adb79ea-kube-api-access-fzs7g\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.705015 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/536595b1-5ba9-4588-8e64-32480adb79ea-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.705070 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.705363 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/536595b1-5ba9-4588-8e64-32480adb79ea-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.705480 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.706080 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.706931 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/536595b1-5ba9-4588-8e64-32480adb79ea-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.707392 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.707425 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/4faa30d08c5fa684ac4f20fe5587ce0f485de321f65a3ebf8745bcb0c4570724/globalmount\"" pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.710099 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/536595b1-5ba9-4588-8e64-32480adb79ea-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.711226 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/536595b1-5ba9-4588-8e64-32480adb79ea-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.721738 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fzs7g\" (UniqueName: \"kubernetes.io/projected/536595b1-5ba9-4588-8e64-32480adb79ea-kube-api-access-fzs7g\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.733621 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-53474d95-d3b9-41f5-b15e-7f4cb32ff523\") pod \"openstack-cell1-galera-0\" (UID: \"536595b1-5ba9-4588-8e64-32480adb79ea\") " pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.831114 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.831349 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.832774 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.834432 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-l8lnq" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.835474 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.838212 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.848547 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.906790 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72e344f5-2a4d-47df-9ae0-59758d16ba41-kolla-config\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.906890 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72e344f5-2a4d-47df-9ae0-59758d16ba41-config-data\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.906929 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtfjj\" (UniqueName: \"kubernetes.io/projected/72e344f5-2a4d-47df-9ae0-59758d16ba41-kube-api-access-jtfjj\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.906954 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72e344f5-2a4d-47df-9ae0-59758d16ba41-memcached-tls-certs\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:14 crc kubenswrapper[5010]: I1126 16:52:14.906974 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72e344f5-2a4d-47df-9ae0-59758d16ba41-combined-ca-bundle\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.009028 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72e344f5-2a4d-47df-9ae0-59758d16ba41-kolla-config\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.009410 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72e344f5-2a4d-47df-9ae0-59758d16ba41-config-data\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.009449 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtfjj\" (UniqueName: \"kubernetes.io/projected/72e344f5-2a4d-47df-9ae0-59758d16ba41-kube-api-access-jtfjj\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.009482 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72e344f5-2a4d-47df-9ae0-59758d16ba41-memcached-tls-certs\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.009512 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72e344f5-2a4d-47df-9ae0-59758d16ba41-combined-ca-bundle\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.009844 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/72e344f5-2a4d-47df-9ae0-59758d16ba41-kolla-config\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.010342 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/72e344f5-2a4d-47df-9ae0-59758d16ba41-config-data\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.014427 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/72e344f5-2a4d-47df-9ae0-59758d16ba41-memcached-tls-certs\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.015581 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/72e344f5-2a4d-47df-9ae0-59758d16ba41-combined-ca-bundle\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.038220 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtfjj\" (UniqueName: \"kubernetes.io/projected/72e344f5-2a4d-47df-9ae0-59758d16ba41-kube-api-access-jtfjj\") pod \"memcached-0\" (UID: \"72e344f5-2a4d-47df-9ae0-59758d16ba41\") " pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.209524 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.342971 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 26 16:52:15 crc kubenswrapper[5010]: W1126 16:52:15.348602 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod536595b1_5ba9_4588_8e64_32480adb79ea.slice/crio-11fb0472665e4a68862229f6b11a0676aa7c097a1c54c046cfe5f15ba713ae8b WatchSource:0}: Error finding container 11fb0472665e4a68862229f6b11a0676aa7c097a1c54c046cfe5f15ba713ae8b: Status 404 returned error can't find the container with id 11fb0472665e4a68862229f6b11a0676aa7c097a1c54c046cfe5f15ba713ae8b Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.488588 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"536595b1-5ba9-4588-8e64-32480adb79ea","Type":"ContainerStarted","Data":"11fb0472665e4a68862229f6b11a0676aa7c097a1c54c046cfe5f15ba713ae8b"} Nov 26 16:52:15 crc kubenswrapper[5010]: I1126 16:52:15.736806 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 26 16:52:16 crc kubenswrapper[5010]: I1126 16:52:16.499745 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"536595b1-5ba9-4588-8e64-32480adb79ea","Type":"ContainerStarted","Data":"4b8ae695b970d0fdf8e868f0ad56c1572d494da5f20103c9682b216d763d48f9"} Nov 26 16:52:16 crc kubenswrapper[5010]: I1126 16:52:16.501371 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"72e344f5-2a4d-47df-9ae0-59758d16ba41","Type":"ContainerStarted","Data":"cda9fe957e3d3f2300e171d402ad36ede91f6b1f7daef6cbdc956625ae4f54de"} Nov 26 16:52:16 crc kubenswrapper[5010]: I1126 16:52:16.501498 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"72e344f5-2a4d-47df-9ae0-59758d16ba41","Type":"ContainerStarted","Data":"0b8a4b61873e3800817f1d468453d9db75746cb2ebd254b52be3accae1577e4b"} Nov 26 16:52:16 crc kubenswrapper[5010]: I1126 16:52:16.501624 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 26 16:52:16 crc kubenswrapper[5010]: I1126 16:52:16.547275 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.547251605 podStartE2EDuration="2.547251605s" podCreationTimestamp="2025-11-26 16:52:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:16.54622041 +0000 UTC m=+5157.336937568" watchObservedRunningTime="2025-11-26 16:52:16.547251605 +0000 UTC m=+5157.337968773" Nov 26 16:52:16 crc kubenswrapper[5010]: I1126 16:52:16.891750 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:52:16 crc kubenswrapper[5010]: E1126 16:52:16.891980 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:52:18 crc kubenswrapper[5010]: I1126 16:52:18.521556 5010 generic.go:334] "Generic (PLEG): container finished" podID="4456ea0d-01da-4a0a-b918-db686f0e23aa" containerID="3e1c3bdf1adfa983f802cda03f48eeb541e1cc8a5ef0c59064bb533617b959d6" exitCode=0 Nov 26 16:52:18 crc kubenswrapper[5010]: I1126 16:52:18.521812 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4456ea0d-01da-4a0a-b918-db686f0e23aa","Type":"ContainerDied","Data":"3e1c3bdf1adfa983f802cda03f48eeb541e1cc8a5ef0c59064bb533617b959d6"} Nov 26 16:52:19 crc kubenswrapper[5010]: I1126 16:52:19.535895 5010 generic.go:334] "Generic (PLEG): container finished" podID="536595b1-5ba9-4588-8e64-32480adb79ea" containerID="4b8ae695b970d0fdf8e868f0ad56c1572d494da5f20103c9682b216d763d48f9" exitCode=0 Nov 26 16:52:19 crc kubenswrapper[5010]: I1126 16:52:19.536005 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"536595b1-5ba9-4588-8e64-32480adb79ea","Type":"ContainerDied","Data":"4b8ae695b970d0fdf8e868f0ad56c1572d494da5f20103c9682b216d763d48f9"} Nov 26 16:52:19 crc kubenswrapper[5010]: I1126 16:52:19.541221 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"4456ea0d-01da-4a0a-b918-db686f0e23aa","Type":"ContainerStarted","Data":"aaa312b9dda1b2b793550f15e13043a5e16759048c1cc2fd9e843e71dd20da9f"} Nov 26 16:52:19 crc kubenswrapper[5010]: I1126 16:52:19.607490 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=8.60746956 podStartE2EDuration="8.60746956s" podCreationTimestamp="2025-11-26 16:52:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:19.604773863 +0000 UTC m=+5160.395491101" watchObservedRunningTime="2025-11-26 16:52:19.60746956 +0000 UTC m=+5160.398186718" Nov 26 16:52:20 crc kubenswrapper[5010]: I1126 16:52:20.211918 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 26 16:52:20 crc kubenswrapper[5010]: I1126 16:52:20.552289 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"536595b1-5ba9-4588-8e64-32480adb79ea","Type":"ContainerStarted","Data":"95ed12bfbdeb00f165baeb81a00fad9026663f9d806923be5910298d07a5a180"} Nov 26 16:52:20 crc kubenswrapper[5010]: I1126 16:52:20.583798 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=7.583762574 podStartE2EDuration="7.583762574s" podCreationTimestamp="2025-11-26 16:52:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:20.570898834 +0000 UTC m=+5161.361615982" watchObservedRunningTime="2025-11-26 16:52:20.583762574 +0000 UTC m=+5161.374479802" Nov 26 16:52:21 crc kubenswrapper[5010]: I1126 16:52:21.263930 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:21 crc kubenswrapper[5010]: I1126 16:52:21.670819 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:52:21 crc kubenswrapper[5010]: I1126 16:52:21.730345 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-l2nwx"] Nov 26 16:52:21 crc kubenswrapper[5010]: I1126 16:52:21.730780 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerName="dnsmasq-dns" containerID="cri-o://d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a" gracePeriod=10 Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.165899 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.225226 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-config\") pod \"6b876b1b-eec1-485c-a227-5fa05c22add7\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.225333 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-78rbl\" (UniqueName: \"kubernetes.io/projected/6b876b1b-eec1-485c-a227-5fa05c22add7-kube-api-access-78rbl\") pod \"6b876b1b-eec1-485c-a227-5fa05c22add7\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.225515 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-dns-svc\") pod \"6b876b1b-eec1-485c-a227-5fa05c22add7\" (UID: \"6b876b1b-eec1-485c-a227-5fa05c22add7\") " Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.232320 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b876b1b-eec1-485c-a227-5fa05c22add7-kube-api-access-78rbl" (OuterVolumeSpecName: "kube-api-access-78rbl") pod "6b876b1b-eec1-485c-a227-5fa05c22add7" (UID: "6b876b1b-eec1-485c-a227-5fa05c22add7"). InnerVolumeSpecName "kube-api-access-78rbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.260220 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-config" (OuterVolumeSpecName: "config") pod "6b876b1b-eec1-485c-a227-5fa05c22add7" (UID: "6b876b1b-eec1-485c-a227-5fa05c22add7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.269291 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6b876b1b-eec1-485c-a227-5fa05c22add7" (UID: "6b876b1b-eec1-485c-a227-5fa05c22add7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.327316 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.327356 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b876b1b-eec1-485c-a227-5fa05c22add7-config\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.327370 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-78rbl\" (UniqueName: \"kubernetes.io/projected/6b876b1b-eec1-485c-a227-5fa05c22add7-kube-api-access-78rbl\") on node \"crc\" DevicePath \"\"" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.569879 5010 generic.go:334] "Generic (PLEG): container finished" podID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerID="d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a" exitCode=0 Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.569959 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" event={"ID":"6b876b1b-eec1-485c-a227-5fa05c22add7","Type":"ContainerDied","Data":"d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a"} Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.570053 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.570076 5010 scope.go:117] "RemoveContainer" containerID="d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.570045 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f4c6c447c-l2nwx" event={"ID":"6b876b1b-eec1-485c-a227-5fa05c22add7","Type":"ContainerDied","Data":"40ac590e501f763410cc788892df93833f3b9eba916ab4221180135f23d2ff16"} Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.600503 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-l2nwx"] Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.605537 5010 scope.go:117] "RemoveContainer" containerID="92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.610187 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f4c6c447c-l2nwx"] Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.634109 5010 scope.go:117] "RemoveContainer" containerID="d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a" Nov 26 16:52:22 crc kubenswrapper[5010]: E1126 16:52:22.634457 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a\": container with ID starting with d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a not found: ID does not exist" containerID="d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.634611 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a"} err="failed to get container status \"d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a\": rpc error: code = NotFound desc = could not find container \"d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a\": container with ID starting with d6bcd38d53c96236b70a357cb01ec0d5be5b747f93b82a1c6a41658e0d3b0d0a not found: ID does not exist" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.634790 5010 scope.go:117] "RemoveContainer" containerID="92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc" Nov 26 16:52:22 crc kubenswrapper[5010]: E1126 16:52:22.635227 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc\": container with ID starting with 92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc not found: ID does not exist" containerID="92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc" Nov 26 16:52:22 crc kubenswrapper[5010]: I1126 16:52:22.635336 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc"} err="failed to get container status \"92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc\": rpc error: code = NotFound desc = could not find container \"92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc\": container with ID starting with 92d42ddb789db4dcdc08986a19ac68ec8a17bfe200a80dd67f34b9b1bcb133cc not found: ID does not exist" Nov 26 16:52:23 crc kubenswrapper[5010]: I1126 16:52:23.667864 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 26 16:52:23 crc kubenswrapper[5010]: I1126 16:52:23.668255 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 26 16:52:23 crc kubenswrapper[5010]: I1126 16:52:23.779571 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 26 16:52:23 crc kubenswrapper[5010]: I1126 16:52:23.905967 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" path="/var/lib/kubelet/pods/6b876b1b-eec1-485c-a227-5fa05c22add7/volumes" Nov 26 16:52:24 crc kubenswrapper[5010]: I1126 16:52:24.775484 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 26 16:52:24 crc kubenswrapper[5010]: I1126 16:52:24.860934 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:24 crc kubenswrapper[5010]: I1126 16:52:24.861212 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:27 crc kubenswrapper[5010]: I1126 16:52:27.056348 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:27 crc kubenswrapper[5010]: I1126 16:52:27.161065 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 26 16:52:31 crc kubenswrapper[5010]: I1126 16:52:31.891762 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:52:31 crc kubenswrapper[5010]: E1126 16:52:31.892327 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:52:46 crc kubenswrapper[5010]: I1126 16:52:46.810669 5010 generic.go:334] "Generic (PLEG): container finished" podID="57796a15-e055-4685-bfe9-83da8320be25" containerID="b95a295f5f5e8676f0f589a7c4f39c6be62a9bff2f164d4fcccd10096ed82996" exitCode=0 Nov 26 16:52:46 crc kubenswrapper[5010]: I1126 16:52:46.811478 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"57796a15-e055-4685-bfe9-83da8320be25","Type":"ContainerDied","Data":"b95a295f5f5e8676f0f589a7c4f39c6be62a9bff2f164d4fcccd10096ed82996"} Nov 26 16:52:46 crc kubenswrapper[5010]: I1126 16:52:46.816124 5010 generic.go:334] "Generic (PLEG): container finished" podID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerID="6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1" exitCode=0 Nov 26 16:52:46 crc kubenswrapper[5010]: I1126 16:52:46.816171 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e9eb49e1-ceca-4317-bd3d-8074787001e4","Type":"ContainerDied","Data":"6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1"} Nov 26 16:52:46 crc kubenswrapper[5010]: I1126 16:52:46.891660 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:52:46 crc kubenswrapper[5010]: E1126 16:52:46.892253 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:52:47 crc kubenswrapper[5010]: I1126 16:52:47.827617 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"57796a15-e055-4685-bfe9-83da8320be25","Type":"ContainerStarted","Data":"d9532fe0c6fe4b10bde6ad4c15e3902863370d3148955c18392d17fa2ed84a76"} Nov 26 16:52:47 crc kubenswrapper[5010]: I1126 16:52:47.828106 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 16:52:47 crc kubenswrapper[5010]: I1126 16:52:47.830051 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e9eb49e1-ceca-4317-bd3d-8074787001e4","Type":"ContainerStarted","Data":"000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da"} Nov 26 16:52:47 crc kubenswrapper[5010]: I1126 16:52:47.830357 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:52:47 crc kubenswrapper[5010]: I1126 16:52:47.852829 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.852810397 podStartE2EDuration="36.852810397s" podCreationTimestamp="2025-11-26 16:52:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:47.849977416 +0000 UTC m=+5188.640694584" watchObservedRunningTime="2025-11-26 16:52:47.852810397 +0000 UTC m=+5188.643527545" Nov 26 16:52:47 crc kubenswrapper[5010]: I1126 16:52:47.882241 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.882214578 podStartE2EDuration="36.882214578s" podCreationTimestamp="2025-11-26 16:52:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:52:47.875986413 +0000 UTC m=+5188.666703571" watchObservedRunningTime="2025-11-26 16:52:47.882214578 +0000 UTC m=+5188.672931746" Nov 26 16:52:59 crc kubenswrapper[5010]: I1126 16:52:59.896353 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:52:59 crc kubenswrapper[5010]: E1126 16:52:59.897560 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:53:02 crc kubenswrapper[5010]: I1126 16:53:02.379544 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:02 crc kubenswrapper[5010]: I1126 16:53:02.800928 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 16:53:05 crc kubenswrapper[5010]: E1126 16:53:05.595656 5010 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.154:46038->38.102.83.154:42721: write tcp 38.102.83.154:46038->38.102.83.154:42721: write: broken pipe Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.820845 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54564445dc-clm2g"] Nov 26 16:53:06 crc kubenswrapper[5010]: E1126 16:53:06.821327 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerName="dnsmasq-dns" Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.821346 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerName="dnsmasq-dns" Nov 26 16:53:06 crc kubenswrapper[5010]: E1126 16:53:06.821379 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerName="init" Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.821389 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerName="init" Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.821676 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b876b1b-eec1-485c-a227-5fa05c22add7" containerName="dnsmasq-dns" Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.822919 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.848611 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-clm2g"] Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.903431 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-config\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.903550 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d72pw\" (UniqueName: \"kubernetes.io/projected/e54363d2-9825-4825-84de-ed7e85d4c162-kube-api-access-d72pw\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:06 crc kubenswrapper[5010]: I1126 16:53:06.903668 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-dns-svc\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.005659 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d72pw\" (UniqueName: \"kubernetes.io/projected/e54363d2-9825-4825-84de-ed7e85d4c162-kube-api-access-d72pw\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.005856 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-dns-svc\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.006006 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-config\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.007094 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-config\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.007356 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-dns-svc\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.037986 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d72pw\" (UniqueName: \"kubernetes.io/projected/e54363d2-9825-4825-84de-ed7e85d4c162-kube-api-access-d72pw\") pod \"dnsmasq-dns-54564445dc-clm2g\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.153906 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.399493 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-clm2g"] Nov 26 16:53:07 crc kubenswrapper[5010]: I1126 16:53:07.571445 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:53:08 crc kubenswrapper[5010]: I1126 16:53:08.060603 5010 generic.go:334] "Generic (PLEG): container finished" podID="e54363d2-9825-4825-84de-ed7e85d4c162" containerID="5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e" exitCode=0 Nov 26 16:53:08 crc kubenswrapper[5010]: I1126 16:53:08.060688 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-clm2g" event={"ID":"e54363d2-9825-4825-84de-ed7e85d4c162","Type":"ContainerDied","Data":"5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e"} Nov 26 16:53:08 crc kubenswrapper[5010]: I1126 16:53:08.060904 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-clm2g" event={"ID":"e54363d2-9825-4825-84de-ed7e85d4c162","Type":"ContainerStarted","Data":"de14b1671747550617891e0e7905054a63e955054aa422c1ac35a24de2c64cee"} Nov 26 16:53:08 crc kubenswrapper[5010]: I1126 16:53:08.243813 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:53:09 crc kubenswrapper[5010]: I1126 16:53:09.069227 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-clm2g" event={"ID":"e54363d2-9825-4825-84de-ed7e85d4c162","Type":"ContainerStarted","Data":"ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780"} Nov 26 16:53:09 crc kubenswrapper[5010]: I1126 16:53:09.069404 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:09 crc kubenswrapper[5010]: I1126 16:53:09.089017 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54564445dc-clm2g" podStartSLOduration=3.088995917 podStartE2EDuration="3.088995917s" podCreationTimestamp="2025-11-26 16:53:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:53:09.08751729 +0000 UTC m=+5209.878234448" watchObservedRunningTime="2025-11-26 16:53:09.088995917 +0000 UTC m=+5209.879713065" Nov 26 16:53:11 crc kubenswrapper[5010]: I1126 16:53:11.774451 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="57796a15-e055-4685-bfe9-83da8320be25" containerName="rabbitmq" containerID="cri-o://d9532fe0c6fe4b10bde6ad4c15e3902863370d3148955c18392d17fa2ed84a76" gracePeriod=604796 Nov 26 16:53:11 crc kubenswrapper[5010]: I1126 16:53:11.892062 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:53:11 crc kubenswrapper[5010]: E1126 16:53:11.892402 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:53:12 crc kubenswrapper[5010]: I1126 16:53:12.241947 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerName="rabbitmq" containerID="cri-o://000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da" gracePeriod=604797 Nov 26 16:53:12 crc kubenswrapper[5010]: I1126 16:53:12.375634 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.1:5671: connect: connection refused" Nov 26 16:53:12 crc kubenswrapper[5010]: I1126 16:53:12.797868 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="57796a15-e055-4685-bfe9-83da8320be25" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.1.2:5671: connect: connection refused" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.156025 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.253511 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-964dp"] Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.254090 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" podUID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerName="dnsmasq-dns" containerID="cri-o://7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691" gracePeriod=10 Nov 26 16:53:17 crc kubenswrapper[5010]: E1126 16:53:17.415251 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67e2ac4e_3adc_4d5b_bd12_20f6c705bf1d.slice/crio-7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod67e2ac4e_3adc_4d5b_bd12_20f6c705bf1d.slice/crio-conmon-7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.717967 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.896313 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d9s8p\" (UniqueName: \"kubernetes.io/projected/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-kube-api-access-d9s8p\") pod \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.896403 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-config\") pod \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.896464 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-dns-svc\") pod \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\" (UID: \"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d\") " Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.907858 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-kube-api-access-d9s8p" (OuterVolumeSpecName: "kube-api-access-d9s8p") pod "67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" (UID: "67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d"). InnerVolumeSpecName "kube-api-access-d9s8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.967280 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" (UID: "67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.978979 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-config" (OuterVolumeSpecName: "config") pod "67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" (UID: "67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.999292 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.999384 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d9s8p\" (UniqueName: \"kubernetes.io/projected/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-kube-api-access-d9s8p\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:17 crc kubenswrapper[5010]: I1126 16:53:17.999458 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d-config\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.172485 5010 generic.go:334] "Generic (PLEG): container finished" podID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerID="7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691" exitCode=0 Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.172589 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.172579 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" event={"ID":"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d","Type":"ContainerDied","Data":"7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691"} Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.172647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59c6c64b5c-964dp" event={"ID":"67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d","Type":"ContainerDied","Data":"40a589fe8a1c2928856bf675fcfde86280230348dece1fdd7644ea34a9a97e36"} Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.172668 5010 scope.go:117] "RemoveContainer" containerID="7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.176321 5010 generic.go:334] "Generic (PLEG): container finished" podID="57796a15-e055-4685-bfe9-83da8320be25" containerID="d9532fe0c6fe4b10bde6ad4c15e3902863370d3148955c18392d17fa2ed84a76" exitCode=0 Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.176355 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"57796a15-e055-4685-bfe9-83da8320be25","Type":"ContainerDied","Data":"d9532fe0c6fe4b10bde6ad4c15e3902863370d3148955c18392d17fa2ed84a76"} Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.208515 5010 scope.go:117] "RemoveContainer" containerID="983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.227611 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-964dp"] Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.230768 5010 scope.go:117] "RemoveContainer" containerID="7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691" Nov 26 16:53:18 crc kubenswrapper[5010]: E1126 16:53:18.234667 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691\": container with ID starting with 7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691 not found: ID does not exist" containerID="7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.234730 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691"} err="failed to get container status \"7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691\": rpc error: code = NotFound desc = could not find container \"7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691\": container with ID starting with 7a3d4461163f15b358c5917ba4455ae24fb8c22a9ab3fa85944f282f8858e691 not found: ID does not exist" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.234768 5010 scope.go:117] "RemoveContainer" containerID="983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.235827 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59c6c64b5c-964dp"] Nov 26 16:53:18 crc kubenswrapper[5010]: E1126 16:53:18.236477 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56\": container with ID starting with 983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56 not found: ID does not exist" containerID="983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.236509 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56"} err="failed to get container status \"983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56\": rpc error: code = NotFound desc = could not find container \"983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56\": container with ID starting with 983323c54e9e203ed2bc88fea83e05e6757dec7815ecd9bbf0f2ac342a422a56 not found: ID does not exist" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.346471 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.508905 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-plugins-conf\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.509493 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/57796a15-e055-4685-bfe9-83da8320be25-pod-info\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.509546 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-plugins\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.509617 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-erlang-cookie\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.509652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-config-data\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.509674 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-confd\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.509790 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdz45\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-kube-api-access-jdz45\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.510097 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.510162 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/57796a15-e055-4685-bfe9-83da8320be25-erlang-cookie-secret\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.510202 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-tls\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.510233 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-server-conf\") pod \"57796a15-e055-4685-bfe9-83da8320be25\" (UID: \"57796a15-e055-4685-bfe9-83da8320be25\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.510575 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.511438 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.511539 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.511557 5010 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.511803 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.513934 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.517487 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-kube-api-access-jdz45" (OuterVolumeSpecName: "kube-api-access-jdz45") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "kube-api-access-jdz45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.517580 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/57796a15-e055-4685-bfe9-83da8320be25-pod-info" (OuterVolumeSpecName: "pod-info") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.524351 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57796a15-e055-4685-bfe9-83da8320be25-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.540530 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-config-data" (OuterVolumeSpecName: "config-data") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.545404 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2" (OuterVolumeSpecName: "persistence") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.559422 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-server-conf" (OuterVolumeSpecName: "server-conf") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.600164 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "57796a15-e055-4685-bfe9-83da8320be25" (UID: "57796a15-e055-4685-bfe9-83da8320be25"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.613427 5010 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/57796a15-e055-4685-bfe9-83da8320be25-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.613588 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.613645 5010 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.613697 5010 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/57796a15-e055-4685-bfe9-83da8320be25-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.613781 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.613853 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/57796a15-e055-4685-bfe9-83da8320be25-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.613946 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.614049 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdz45\" (UniqueName: \"kubernetes.io/projected/57796a15-e055-4685-bfe9-83da8320be25-kube-api-access-jdz45\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.614160 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") on node \"crc\" " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.636521 5010 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.636730 5010 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2") on node "crc" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.715572 5010 reconciler_common.go:293] "Volume detached for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.772785 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918512 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-server-conf\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918564 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-tls\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918619 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z29jx\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-kube-api-access-z29jx\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918689 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-erlang-cookie\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918742 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e9eb49e1-ceca-4317-bd3d-8074787001e4-erlang-cookie-secret\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918791 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-confd\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918857 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-plugins\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918881 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-config-data\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.918911 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e9eb49e1-ceca-4317-bd3d-8074787001e4-pod-info\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.919037 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.919077 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-plugins-conf\") pod \"e9eb49e1-ceca-4317-bd3d-8074787001e4\" (UID: \"e9eb49e1-ceca-4317-bd3d-8074787001e4\") " Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.920075 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.920576 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.921067 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.924699 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/e9eb49e1-ceca-4317-bd3d-8074787001e4-pod-info" (OuterVolumeSpecName: "pod-info") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.925629 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.926803 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-kube-api-access-z29jx" (OuterVolumeSpecName: "kube-api-access-z29jx") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "kube-api-access-z29jx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.932896 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9eb49e1-ceca-4317-bd3d-8074787001e4-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.938823 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0" (OuterVolumeSpecName: "persistence") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.954169 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-config-data" (OuterVolumeSpecName: "config-data") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:18 crc kubenswrapper[5010]: I1126 16:53:18.971174 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-server-conf" (OuterVolumeSpecName: "server-conf") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.021212 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "e9eb49e1-ceca-4317-bd3d-8074787001e4" (UID: "e9eb49e1-ceca-4317-bd3d-8074787001e4"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.021631 5010 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-server-conf\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.021659 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.021672 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z29jx\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-kube-api-access-z29jx\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.022850 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.022865 5010 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/e9eb49e1-ceca-4317-bd3d-8074787001e4-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.022877 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.022893 5010 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/e9eb49e1-ceca-4317-bd3d-8074787001e4-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.022904 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.022914 5010 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/e9eb49e1-ceca-4317-bd3d-8074787001e4-pod-info\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.022947 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") on node \"crc\" " Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.023008 5010 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/e9eb49e1-ceca-4317-bd3d-8074787001e4-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.046023 5010 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.046209 5010 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0") on node "crc" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.124709 5010 reconciler_common.go:293] "Volume detached for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") on node \"crc\" DevicePath \"\"" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.189641 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"57796a15-e055-4685-bfe9-83da8320be25","Type":"ContainerDied","Data":"6082ef88a32c79dc7b8cab3275c2457e56538ada693f4b4c357a87e3558b197a"} Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.189703 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.189855 5010 scope.go:117] "RemoveContainer" containerID="d9532fe0c6fe4b10bde6ad4c15e3902863370d3148955c18392d17fa2ed84a76" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.192186 5010 generic.go:334] "Generic (PLEG): container finished" podID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerID="000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da" exitCode=0 Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.192380 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e9eb49e1-ceca-4317-bd3d-8074787001e4","Type":"ContainerDied","Data":"000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da"} Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.192427 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"e9eb49e1-ceca-4317-bd3d-8074787001e4","Type":"ContainerDied","Data":"3a24a124186281252a2be13aa220f0af3e64b4afeb70f8d6a3c6a2a06fe68351"} Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.192536 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.222000 5010 scope.go:117] "RemoveContainer" containerID="b95a295f5f5e8676f0f589a7c4f39c6be62a9bff2f164d4fcccd10096ed82996" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.258410 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.271316 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.272267 5010 scope.go:117] "RemoveContainer" containerID="000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.284885 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.293889 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.300781 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.301201 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerName="init" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301256 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerName="init" Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.301280 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57796a15-e055-4685-bfe9-83da8320be25" containerName="rabbitmq" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301289 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="57796a15-e055-4685-bfe9-83da8320be25" containerName="rabbitmq" Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.301310 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerName="rabbitmq" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301318 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerName="rabbitmq" Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.301334 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerName="setup-container" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301342 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerName="setup-container" Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.301365 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57796a15-e055-4685-bfe9-83da8320be25" containerName="setup-container" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301373 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="57796a15-e055-4685-bfe9-83da8320be25" containerName="setup-container" Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.301389 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerName="dnsmasq-dns" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301396 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerName="dnsmasq-dns" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301604 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" containerName="dnsmasq-dns" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301620 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" containerName="rabbitmq" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.301639 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="57796a15-e055-4685-bfe9-83da8320be25" containerName="rabbitmq" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.302643 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.305793 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.306223 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.306700 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.307177 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.311691 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-k2czn" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.311887 5010 scope.go:117] "RemoveContainer" containerID="6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.312274 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.312483 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.322607 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.324214 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.328349 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.328620 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-2kpf2" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.328835 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.329029 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.329176 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.329323 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.333241 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.333476 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.342828 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.390402 5010 scope.go:117] "RemoveContainer" containerID="000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da" Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.390908 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da\": container with ID starting with 000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da not found: ID does not exist" containerID="000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.390931 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da"} err="failed to get container status \"000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da\": rpc error: code = NotFound desc = could not find container \"000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da\": container with ID starting with 000407092ce084318ded2a47070cdc579ee5335cdff59b0be4504ef5581bc8da not found: ID does not exist" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.390952 5010 scope.go:117] "RemoveContainer" containerID="6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1" Nov 26 16:53:19 crc kubenswrapper[5010]: E1126 16:53:19.391250 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1\": container with ID starting with 6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1 not found: ID does not exist" containerID="6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.391271 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1"} err="failed to get container status \"6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1\": rpc error: code = NotFound desc = could not find container \"6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1\": container with ID starting with 6ffa79975817f5ca4b2afa1f4eb5c07c3b11d3e3af8e753af198e92e03dbefe1 not found: ID does not exist" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.428661 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.428756 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.428846 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.428879 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.428918 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.428977 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429009 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429049 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429079 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/900e098f-8106-435a-964a-a4e3755308fc-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429155 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429178 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429203 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429272 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429317 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxrzp\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-kube-api-access-nxrzp\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429348 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429407 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429427 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/900e098f-8106-435a-964a-a4e3755308fc-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429507 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429578 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429600 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jljvb\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-kube-api-access-jljvb\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429629 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.429647 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.531486 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.531574 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.531667 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/900e098f-8106-435a-964a-a4e3755308fc-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.531776 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.531844 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.531901 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jljvb\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-kube-api-access-jljvb\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.531977 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532043 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532113 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532175 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532235 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532303 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532380 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532495 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532560 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532614 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532675 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/900e098f-8106-435a-964a-a4e3755308fc-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532797 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532860 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.532919 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.533006 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.533068 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxrzp\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-kube-api-access-nxrzp\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.533576 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.533737 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.533833 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.533874 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.534705 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-config-data\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.534814 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-server-conf\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.533017 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.536749 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/900e098f-8106-435a-964a-a4e3755308fc-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.537598 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.537623 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.537646 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/6ae0b79e21bdf677e9addbce9b06dc5ea6a1fabdcef32b98f2493c75fdcd03f4/globalmount\"" pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.537904 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.538354 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.538401 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.538408 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/2498bea4bb787ce3fa7dbd459d0da0f5571f9929b0ff125c01dcb0b60df1edb8/globalmount\"" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.539202 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.541739 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.543254 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-pod-info\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.544257 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.544431 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/900e098f-8106-435a-964a-a4e3755308fc-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.549698 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.551628 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/900e098f-8106-435a-964a-a4e3755308fc-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.555570 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jljvb\" (UniqueName: \"kubernetes.io/projected/aa44ef1f-4c07-4afd-97c6-9e0075ad6f71-kube-api-access-jljvb\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.568391 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxrzp\" (UniqueName: \"kubernetes.io/projected/900e098f-8106-435a-964a-a4e3755308fc-kube-api-access-nxrzp\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.597697 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8cb1568-1d48-49bf-8290-7c7e4aba7cb2\") pod \"rabbitmq-server-0\" (UID: \"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71\") " pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.599304 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d0a6eec9-cf11-4e6e-9ad4-d092b7df65b0\") pod \"rabbitmq-cell1-server-0\" (UID: \"900e098f-8106-435a-964a-a4e3755308fc\") " pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.623242 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.646684 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.868797 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: W1126 16:53:19.882269 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa44ef1f_4c07_4afd_97c6_9e0075ad6f71.slice/crio-eff8ff99785e5ccee8bf089108f44dddaeed157a1686c73d45d5da1b11629535 WatchSource:0}: Error finding container eff8ff99785e5ccee8bf089108f44dddaeed157a1686c73d45d5da1b11629535: Status 404 returned error can't find the container with id eff8ff99785e5ccee8bf089108f44dddaeed157a1686c73d45d5da1b11629535 Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.930709 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57796a15-e055-4685-bfe9-83da8320be25" path="/var/lib/kubelet/pods/57796a15-e055-4685-bfe9-83da8320be25/volumes" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.931275 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d" path="/var/lib/kubelet/pods/67e2ac4e-3adc-4d5b-bd12-20f6c705bf1d/volumes" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.932501 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9eb49e1-ceca-4317-bd3d-8074787001e4" path="/var/lib/kubelet/pods/e9eb49e1-ceca-4317-bd3d-8074787001e4/volumes" Nov 26 16:53:19 crc kubenswrapper[5010]: I1126 16:53:19.933262 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 26 16:53:19 crc kubenswrapper[5010]: W1126 16:53:19.934744 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod900e098f_8106_435a_964a_a4e3755308fc.slice/crio-1c14b0493ac0e7392a7bb2c4cf23819d5377ec20a51533bc63fb46b65f3113df WatchSource:0}: Error finding container 1c14b0493ac0e7392a7bb2c4cf23819d5377ec20a51533bc63fb46b65f3113df: Status 404 returned error can't find the container with id 1c14b0493ac0e7392a7bb2c4cf23819d5377ec20a51533bc63fb46b65f3113df Nov 26 16:53:20 crc kubenswrapper[5010]: I1126 16:53:20.204097 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71","Type":"ContainerStarted","Data":"eff8ff99785e5ccee8bf089108f44dddaeed157a1686c73d45d5da1b11629535"} Nov 26 16:53:20 crc kubenswrapper[5010]: I1126 16:53:20.209868 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"900e098f-8106-435a-964a-a4e3755308fc","Type":"ContainerStarted","Data":"1c14b0493ac0e7392a7bb2c4cf23819d5377ec20a51533bc63fb46b65f3113df"} Nov 26 16:53:22 crc kubenswrapper[5010]: I1126 16:53:22.246184 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"900e098f-8106-435a-964a-a4e3755308fc","Type":"ContainerStarted","Data":"888be89b559c2fa9a1b897948480e688a2f5c8404473d837a00c132d4f97f050"} Nov 26 16:53:22 crc kubenswrapper[5010]: I1126 16:53:22.249471 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71","Type":"ContainerStarted","Data":"2748eaba7510e99e7c8e7e63e70d9cd5c64908b7fca968b70fd5255d2100a9bb"} Nov 26 16:53:25 crc kubenswrapper[5010]: I1126 16:53:25.892909 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:53:25 crc kubenswrapper[5010]: E1126 16:53:25.894648 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:53:36 crc kubenswrapper[5010]: I1126 16:53:36.891630 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:53:36 crc kubenswrapper[5010]: E1126 16:53:36.892679 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:53:51 crc kubenswrapper[5010]: I1126 16:53:51.892274 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:53:51 crc kubenswrapper[5010]: E1126 16:53:51.893327 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:53:54 crc kubenswrapper[5010]: I1126 16:53:54.595264 5010 generic.go:334] "Generic (PLEG): container finished" podID="aa44ef1f-4c07-4afd-97c6-9e0075ad6f71" containerID="2748eaba7510e99e7c8e7e63e70d9cd5c64908b7fca968b70fd5255d2100a9bb" exitCode=0 Nov 26 16:53:54 crc kubenswrapper[5010]: I1126 16:53:54.595403 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71","Type":"ContainerDied","Data":"2748eaba7510e99e7c8e7e63e70d9cd5c64908b7fca968b70fd5255d2100a9bb"} Nov 26 16:53:55 crc kubenswrapper[5010]: I1126 16:53:55.608396 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"aa44ef1f-4c07-4afd-97c6-9e0075ad6f71","Type":"ContainerStarted","Data":"b9153baea643d11c2e6a9a939bd053d4a3495db26b692792cf3e4c7b19fb1944"} Nov 26 16:53:55 crc kubenswrapper[5010]: I1126 16:53:55.609165 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 26 16:53:55 crc kubenswrapper[5010]: I1126 16:53:55.611262 5010 generic.go:334] "Generic (PLEG): container finished" podID="900e098f-8106-435a-964a-a4e3755308fc" containerID="888be89b559c2fa9a1b897948480e688a2f5c8404473d837a00c132d4f97f050" exitCode=0 Nov 26 16:53:55 crc kubenswrapper[5010]: I1126 16:53:55.611311 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"900e098f-8106-435a-964a-a4e3755308fc","Type":"ContainerDied","Data":"888be89b559c2fa9a1b897948480e688a2f5c8404473d837a00c132d4f97f050"} Nov 26 16:53:55 crc kubenswrapper[5010]: I1126 16:53:55.654447 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.654418662 podStartE2EDuration="36.654418662s" podCreationTimestamp="2025-11-26 16:53:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:53:55.645164462 +0000 UTC m=+5256.435881700" watchObservedRunningTime="2025-11-26 16:53:55.654418662 +0000 UTC m=+5256.445135840" Nov 26 16:53:56 crc kubenswrapper[5010]: I1126 16:53:56.622033 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"900e098f-8106-435a-964a-a4e3755308fc","Type":"ContainerStarted","Data":"a4017164c4f4e0a5ba0378e779e006f02c90c91429d24419f6261d254759fa58"} Nov 26 16:53:56 crc kubenswrapper[5010]: I1126 16:53:56.622862 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:53:56 crc kubenswrapper[5010]: I1126 16:53:56.659223 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.659205336 podStartE2EDuration="37.659205336s" podCreationTimestamp="2025-11-26 16:53:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:53:56.655885414 +0000 UTC m=+5257.446602622" watchObservedRunningTime="2025-11-26 16:53:56.659205336 +0000 UTC m=+5257.449922474" Nov 26 16:54:06 crc kubenswrapper[5010]: I1126 16:54:06.892749 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:54:06 crc kubenswrapper[5010]: E1126 16:54:06.894022 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 16:54:09 crc kubenswrapper[5010]: I1126 16:54:09.628024 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 26 16:54:09 crc kubenswrapper[5010]: I1126 16:54:09.653025 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 26 16:54:14 crc kubenswrapper[5010]: I1126 16:54:14.746069 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 16:54:14 crc kubenswrapper[5010]: I1126 16:54:14.748114 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 16:54:14 crc kubenswrapper[5010]: I1126 16:54:14.752014 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qjbs8" Nov 26 16:54:14 crc kubenswrapper[5010]: I1126 16:54:14.758570 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 16:54:14 crc kubenswrapper[5010]: I1126 16:54:14.864569 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66wt7\" (UniqueName: \"kubernetes.io/projected/34ef03b9-18c9-42ea-9442-a35a5106f353-kube-api-access-66wt7\") pod \"mariadb-client-1-default\" (UID: \"34ef03b9-18c9-42ea-9442-a35a5106f353\") " pod="openstack/mariadb-client-1-default" Nov 26 16:54:14 crc kubenswrapper[5010]: I1126 16:54:14.965788 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66wt7\" (UniqueName: \"kubernetes.io/projected/34ef03b9-18c9-42ea-9442-a35a5106f353-kube-api-access-66wt7\") pod \"mariadb-client-1-default\" (UID: \"34ef03b9-18c9-42ea-9442-a35a5106f353\") " pod="openstack/mariadb-client-1-default" Nov 26 16:54:15 crc kubenswrapper[5010]: I1126 16:54:14.999449 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66wt7\" (UniqueName: \"kubernetes.io/projected/34ef03b9-18c9-42ea-9442-a35a5106f353-kube-api-access-66wt7\") pod \"mariadb-client-1-default\" (UID: \"34ef03b9-18c9-42ea-9442-a35a5106f353\") " pod="openstack/mariadb-client-1-default" Nov 26 16:54:15 crc kubenswrapper[5010]: I1126 16:54:15.103889 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 16:54:15 crc kubenswrapper[5010]: I1126 16:54:15.470779 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 16:54:15 crc kubenswrapper[5010]: I1126 16:54:15.813602 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"34ef03b9-18c9-42ea-9442-a35a5106f353","Type":"ContainerStarted","Data":"5b85dfd6fc11a87acfe8bc987f892e27429643191199ae975492a0c4824c2618"} Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.102141 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4g8x8"] Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.105932 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.129477 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4g8x8"] Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.185436 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g5vf\" (UniqueName: \"kubernetes.io/projected/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-kube-api-access-2g5vf\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.185670 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-utilities\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.185791 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-catalog-content\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.287420 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-utilities\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.287569 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-catalog-content\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.287669 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g5vf\" (UniqueName: \"kubernetes.io/projected/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-kube-api-access-2g5vf\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.288096 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-utilities\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.288257 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-catalog-content\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.315701 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g5vf\" (UniqueName: \"kubernetes.io/projected/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-kube-api-access-2g5vf\") pod \"redhat-operators-4g8x8\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.487807 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:16 crc kubenswrapper[5010]: I1126 16:54:16.947418 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4g8x8"] Nov 26 16:54:16 crc kubenswrapper[5010]: W1126 16:54:16.964469 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode568abc9_37d2_4045_b3ed_ee9c5ced0a55.slice/crio-2c1a6de68b5f2817cb33d09a0931004288c2d627101a7e2a4bc6677993727ac4 WatchSource:0}: Error finding container 2c1a6de68b5f2817cb33d09a0931004288c2d627101a7e2a4bc6677993727ac4: Status 404 returned error can't find the container with id 2c1a6de68b5f2817cb33d09a0931004288c2d627101a7e2a4bc6677993727ac4 Nov 26 16:54:17 crc kubenswrapper[5010]: I1126 16:54:17.835996 5010 generic.go:334] "Generic (PLEG): container finished" podID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerID="6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc" exitCode=0 Nov 26 16:54:17 crc kubenswrapper[5010]: I1126 16:54:17.836098 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4g8x8" event={"ID":"e568abc9-37d2-4045-b3ed-ee9c5ced0a55","Type":"ContainerDied","Data":"6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc"} Nov 26 16:54:17 crc kubenswrapper[5010]: I1126 16:54:17.836628 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4g8x8" event={"ID":"e568abc9-37d2-4045-b3ed-ee9c5ced0a55","Type":"ContainerStarted","Data":"2c1a6de68b5f2817cb33d09a0931004288c2d627101a7e2a4bc6677993727ac4"} Nov 26 16:54:18 crc kubenswrapper[5010]: I1126 16:54:18.892251 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.689695 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tnmkc"] Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.693991 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.705384 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tnmkc"] Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.754850 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-catalog-content\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.754893 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzx8t\" (UniqueName: \"kubernetes.io/projected/94d18f21-57ef-48cf-b466-bce8a161a0d1-kube-api-access-nzx8t\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.754927 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-utilities\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.856072 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-catalog-content\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.856120 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzx8t\" (UniqueName: \"kubernetes.io/projected/94d18f21-57ef-48cf-b466-bce8a161a0d1-kube-api-access-nzx8t\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.856152 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-utilities\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.856759 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-catalog-content\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.856788 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-utilities\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:19 crc kubenswrapper[5010]: I1126 16:54:19.889896 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzx8t\" (UniqueName: \"kubernetes.io/projected/94d18f21-57ef-48cf-b466-bce8a161a0d1-kube-api-access-nzx8t\") pod \"community-operators-tnmkc\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:20 crc kubenswrapper[5010]: I1126 16:54:20.069511 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:23 crc kubenswrapper[5010]: I1126 16:54:23.807658 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tnmkc"] Nov 26 16:54:23 crc kubenswrapper[5010]: I1126 16:54:23.900918 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnmkc" event={"ID":"94d18f21-57ef-48cf-b466-bce8a161a0d1","Type":"ContainerStarted","Data":"8f4102015030611d3f1c1a8348657b4d88911a3d1b9bf89051ebf1f7e5bce162"} Nov 26 16:54:23 crc kubenswrapper[5010]: I1126 16:54:23.901039 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"9bc507e475d0d8669f1bff93162119416272f1c73d6cf135f83056541ab8c1ac"} Nov 26 16:54:23 crc kubenswrapper[5010]: I1126 16:54:23.903197 5010 generic.go:334] "Generic (PLEG): container finished" podID="34ef03b9-18c9-42ea-9442-a35a5106f353" containerID="fa3efe460f3fbab3677c5bc439e8d9f95110b1093eb34cdd00ac581c55fd5ef5" exitCode=0 Nov 26 16:54:23 crc kubenswrapper[5010]: I1126 16:54:23.903248 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1-default" event={"ID":"34ef03b9-18c9-42ea-9442-a35a5106f353","Type":"ContainerDied","Data":"fa3efe460f3fbab3677c5bc439e8d9f95110b1093eb34cdd00ac581c55fd5ef5"} Nov 26 16:54:23 crc kubenswrapper[5010]: I1126 16:54:23.905894 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4g8x8" event={"ID":"e568abc9-37d2-4045-b3ed-ee9c5ced0a55","Type":"ContainerStarted","Data":"d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f"} Nov 26 16:54:24 crc kubenswrapper[5010]: I1126 16:54:24.919075 5010 generic.go:334] "Generic (PLEG): container finished" podID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerID="3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2" exitCode=0 Nov 26 16:54:24 crc kubenswrapper[5010]: I1126 16:54:24.919319 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnmkc" event={"ID":"94d18f21-57ef-48cf-b466-bce8a161a0d1","Type":"ContainerDied","Data":"3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2"} Nov 26 16:54:24 crc kubenswrapper[5010]: I1126 16:54:24.924316 5010 generic.go:334] "Generic (PLEG): container finished" podID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerID="d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f" exitCode=0 Nov 26 16:54:24 crc kubenswrapper[5010]: I1126 16:54:24.924541 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4g8x8" event={"ID":"e568abc9-37d2-4045-b3ed-ee9c5ced0a55","Type":"ContainerDied","Data":"d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f"} Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.326493 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.359610 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1-default_34ef03b9-18c9-42ea-9442-a35a5106f353/mariadb-client-1-default/0.log" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.386570 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.394454 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1-default"] Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.447913 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66wt7\" (UniqueName: \"kubernetes.io/projected/34ef03b9-18c9-42ea-9442-a35a5106f353-kube-api-access-66wt7\") pod \"34ef03b9-18c9-42ea-9442-a35a5106f353\" (UID: \"34ef03b9-18c9-42ea-9442-a35a5106f353\") " Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.454964 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34ef03b9-18c9-42ea-9442-a35a5106f353-kube-api-access-66wt7" (OuterVolumeSpecName: "kube-api-access-66wt7") pod "34ef03b9-18c9-42ea-9442-a35a5106f353" (UID: "34ef03b9-18c9-42ea-9442-a35a5106f353"). InnerVolumeSpecName "kube-api-access-66wt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.550209 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66wt7\" (UniqueName: \"kubernetes.io/projected/34ef03b9-18c9-42ea-9442-a35a5106f353-kube-api-access-66wt7\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.940459 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34ef03b9-18c9-42ea-9442-a35a5106f353" path="/var/lib/kubelet/pods/34ef03b9-18c9-42ea-9442-a35a5106f353/volumes" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.943599 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1-default" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.943651 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.943942 5010 scope.go:117] "RemoveContainer" containerID="fa3efe460f3fbab3677c5bc439e8d9f95110b1093eb34cdd00ac581c55fd5ef5" Nov 26 16:54:25 crc kubenswrapper[5010]: E1126 16:54:25.945994 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34ef03b9-18c9-42ea-9442-a35a5106f353" containerName="mariadb-client-1-default" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.946012 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="34ef03b9-18c9-42ea-9442-a35a5106f353" containerName="mariadb-client-1-default" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.946427 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="34ef03b9-18c9-42ea-9442-a35a5106f353" containerName="mariadb-client-1-default" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.947461 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.947551 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.949867 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qjbs8" Nov 26 16:54:25 crc kubenswrapper[5010]: I1126 16:54:25.958260 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2x69x\" (UniqueName: \"kubernetes.io/projected/dc047edc-8337-43a7-a7b9-85ae039f9add-kube-api-access-2x69x\") pod \"mariadb-client-2-default\" (UID: \"dc047edc-8337-43a7-a7b9-85ae039f9add\") " pod="openstack/mariadb-client-2-default" Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.059647 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2x69x\" (UniqueName: \"kubernetes.io/projected/dc047edc-8337-43a7-a7b9-85ae039f9add-kube-api-access-2x69x\") pod \"mariadb-client-2-default\" (UID: \"dc047edc-8337-43a7-a7b9-85ae039f9add\") " pod="openstack/mariadb-client-2-default" Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.078112 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2x69x\" (UniqueName: \"kubernetes.io/projected/dc047edc-8337-43a7-a7b9-85ae039f9add-kube-api-access-2x69x\") pod \"mariadb-client-2-default\" (UID: \"dc047edc-8337-43a7-a7b9-85ae039f9add\") " pod="openstack/mariadb-client-2-default" Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.325447 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 16:54:26 crc kubenswrapper[5010]: W1126 16:54:26.732773 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc047edc_8337_43a7_a7b9_85ae039f9add.slice/crio-5e8ceb6ba2e3d6054dfcbf5ee6b20f18219a9f1c7f7200adc02aa7c3e69b4e37 WatchSource:0}: Error finding container 5e8ceb6ba2e3d6054dfcbf5ee6b20f18219a9f1c7f7200adc02aa7c3e69b4e37: Status 404 returned error can't find the container with id 5e8ceb6ba2e3d6054dfcbf5ee6b20f18219a9f1c7f7200adc02aa7c3e69b4e37 Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.733362 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.951677 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"dc047edc-8337-43a7-a7b9-85ae039f9add","Type":"ContainerStarted","Data":"5d6d5ef4dcb7b9e25b58e11ff9781534e16646be887d60756e94531e810a383d"} Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.952146 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"dc047edc-8337-43a7-a7b9-85ae039f9add","Type":"ContainerStarted","Data":"5e8ceb6ba2e3d6054dfcbf5ee6b20f18219a9f1c7f7200adc02aa7c3e69b4e37"} Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.957152 5010 generic.go:334] "Generic (PLEG): container finished" podID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerID="f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078" exitCode=0 Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.957260 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnmkc" event={"ID":"94d18f21-57ef-48cf-b466-bce8a161a0d1","Type":"ContainerDied","Data":"f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078"} Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.970602 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4g8x8" event={"ID":"e568abc9-37d2-4045-b3ed-ee9c5ced0a55","Type":"ContainerStarted","Data":"af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d"} Nov 26 16:54:26 crc kubenswrapper[5010]: I1126 16:54:26.973308 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-2-default" podStartSLOduration=1.973295302 podStartE2EDuration="1.973295302s" podCreationTimestamp="2025-11-26 16:54:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:54:26.966222906 +0000 UTC m=+5287.756940054" watchObservedRunningTime="2025-11-26 16:54:26.973295302 +0000 UTC m=+5287.764012450" Nov 26 16:54:27 crc kubenswrapper[5010]: I1126 16:54:27.020165 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4g8x8" podStartSLOduration=3.194801825 podStartE2EDuration="11.020143538s" podCreationTimestamp="2025-11-26 16:54:16 +0000 UTC" firstStartedPulling="2025-11-26 16:54:17.838234837 +0000 UTC m=+5278.628951985" lastFinishedPulling="2025-11-26 16:54:25.6635765 +0000 UTC m=+5286.454293698" observedRunningTime="2025-11-26 16:54:27.01379339 +0000 UTC m=+5287.804510548" watchObservedRunningTime="2025-11-26 16:54:27.020143538 +0000 UTC m=+5287.810860686" Nov 26 16:54:27 crc kubenswrapper[5010]: I1126 16:54:27.980355 5010 generic.go:334] "Generic (PLEG): container finished" podID="dc047edc-8337-43a7-a7b9-85ae039f9add" containerID="5d6d5ef4dcb7b9e25b58e11ff9781534e16646be887d60756e94531e810a383d" exitCode=1 Nov 26 16:54:27 crc kubenswrapper[5010]: I1126 16:54:27.980460 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2-default" event={"ID":"dc047edc-8337-43a7-a7b9-85ae039f9add","Type":"ContainerDied","Data":"5d6d5ef4dcb7b9e25b58e11ff9781534e16646be887d60756e94531e810a383d"} Nov 26 16:54:28 crc kubenswrapper[5010]: I1126 16:54:28.994961 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnmkc" event={"ID":"94d18f21-57ef-48cf-b466-bce8a161a0d1","Type":"ContainerStarted","Data":"a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722"} Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.029454 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tnmkc" podStartSLOduration=7.142509918 podStartE2EDuration="10.029430439s" podCreationTimestamp="2025-11-26 16:54:19 +0000 UTC" firstStartedPulling="2025-11-26 16:54:24.92293784 +0000 UTC m=+5285.713654998" lastFinishedPulling="2025-11-26 16:54:27.809858371 +0000 UTC m=+5288.600575519" observedRunningTime="2025-11-26 16:54:29.021661686 +0000 UTC m=+5289.812378874" watchObservedRunningTime="2025-11-26 16:54:29.029430439 +0000 UTC m=+5289.820147587" Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.499846 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.535504 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.540621 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2-default"] Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.620472 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2x69x\" (UniqueName: \"kubernetes.io/projected/dc047edc-8337-43a7-a7b9-85ae039f9add-kube-api-access-2x69x\") pod \"dc047edc-8337-43a7-a7b9-85ae039f9add\" (UID: \"dc047edc-8337-43a7-a7b9-85ae039f9add\") " Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.627833 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc047edc-8337-43a7-a7b9-85ae039f9add-kube-api-access-2x69x" (OuterVolumeSpecName: "kube-api-access-2x69x") pod "dc047edc-8337-43a7-a7b9-85ae039f9add" (UID: "dc047edc-8337-43a7-a7b9-85ae039f9add"). InnerVolumeSpecName "kube-api-access-2x69x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.722380 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2x69x\" (UniqueName: \"kubernetes.io/projected/dc047edc-8337-43a7-a7b9-85ae039f9add-kube-api-access-2x69x\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:29 crc kubenswrapper[5010]: I1126 16:54:29.902950 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc047edc-8337-43a7-a7b9-85ae039f9add" path="/var/lib/kubelet/pods/dc047edc-8337-43a7-a7b9-85ae039f9add/volumes" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.010190 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2-default" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.010463 5010 scope.go:117] "RemoveContainer" containerID="5d6d5ef4dcb7b9e25b58e11ff9781534e16646be887d60756e94531e810a383d" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.028720 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-1"] Nov 26 16:54:30 crc kubenswrapper[5010]: E1126 16:54:30.029375 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc047edc-8337-43a7-a7b9-85ae039f9add" containerName="mariadb-client-2-default" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.029396 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc047edc-8337-43a7-a7b9-85ae039f9add" containerName="mariadb-client-2-default" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.029603 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc047edc-8337-43a7-a7b9-85ae039f9add" containerName="mariadb-client-2-default" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.030192 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.032975 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qjbs8" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.058190 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.070152 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.070199 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.128908 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcmzx\" (UniqueName: \"kubernetes.io/projected/cc6014b4-e852-4f43-8a94-4ddcd34a32d8-kube-api-access-mcmzx\") pod \"mariadb-client-1\" (UID: \"cc6014b4-e852-4f43-8a94-4ddcd34a32d8\") " pod="openstack/mariadb-client-1" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.230699 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcmzx\" (UniqueName: \"kubernetes.io/projected/cc6014b4-e852-4f43-8a94-4ddcd34a32d8-kube-api-access-mcmzx\") pod \"mariadb-client-1\" (UID: \"cc6014b4-e852-4f43-8a94-4ddcd34a32d8\") " pod="openstack/mariadb-client-1" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.247945 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcmzx\" (UniqueName: \"kubernetes.io/projected/cc6014b4-e852-4f43-8a94-4ddcd34a32d8-kube-api-access-mcmzx\") pod \"mariadb-client-1\" (UID: \"cc6014b4-e852-4f43-8a94-4ddcd34a32d8\") " pod="openstack/mariadb-client-1" Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.389317 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 16:54:30 crc kubenswrapper[5010]: W1126 16:54:30.736350 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc6014b4_e852_4f43_8a94_4ddcd34a32d8.slice/crio-7773aea187c6f68502e914a0bb25eb4d0d39be429e9be82b260849a438202b76 WatchSource:0}: Error finding container 7773aea187c6f68502e914a0bb25eb4d0d39be429e9be82b260849a438202b76: Status 404 returned error can't find the container with id 7773aea187c6f68502e914a0bb25eb4d0d39be429e9be82b260849a438202b76 Nov 26 16:54:30 crc kubenswrapper[5010]: I1126 16:54:30.741548 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 16:54:31 crc kubenswrapper[5010]: I1126 16:54:31.026488 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"cc6014b4-e852-4f43-8a94-4ddcd34a32d8","Type":"ContainerStarted","Data":"0c2a102da55c1c95f648861c13afe932017037f7a4b66eba50efefa3ef192137"} Nov 26 16:54:31 crc kubenswrapper[5010]: I1126 16:54:31.026902 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"cc6014b4-e852-4f43-8a94-4ddcd34a32d8","Type":"ContainerStarted","Data":"7773aea187c6f68502e914a0bb25eb4d0d39be429e9be82b260849a438202b76"} Nov 26 16:54:31 crc kubenswrapper[5010]: I1126 16:54:31.119854 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-tnmkc" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="registry-server" probeResult="failure" output=< Nov 26 16:54:31 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 16:54:31 crc kubenswrapper[5010]: > Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.035405 5010 generic.go:334] "Generic (PLEG): container finished" podID="cc6014b4-e852-4f43-8a94-4ddcd34a32d8" containerID="0c2a102da55c1c95f648861c13afe932017037f7a4b66eba50efefa3ef192137" exitCode=0 Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.035448 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-1" event={"ID":"cc6014b4-e852-4f43-8a94-4ddcd34a32d8","Type":"ContainerDied","Data":"0c2a102da55c1c95f648861c13afe932017037f7a4b66eba50efefa3ef192137"} Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.463004 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.468845 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcmzx\" (UniqueName: \"kubernetes.io/projected/cc6014b4-e852-4f43-8a94-4ddcd34a32d8-kube-api-access-mcmzx\") pod \"cc6014b4-e852-4f43-8a94-4ddcd34a32d8\" (UID: \"cc6014b4-e852-4f43-8a94-4ddcd34a32d8\") " Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.476733 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc6014b4-e852-4f43-8a94-4ddcd34a32d8-kube-api-access-mcmzx" (OuterVolumeSpecName: "kube-api-access-mcmzx") pod "cc6014b4-e852-4f43-8a94-4ddcd34a32d8" (UID: "cc6014b4-e852-4f43-8a94-4ddcd34a32d8"). InnerVolumeSpecName "kube-api-access-mcmzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.487191 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-1_cc6014b4-e852-4f43-8a94-4ddcd34a32d8/mariadb-client-1/0.log" Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.520254 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.529531 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-1"] Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.570927 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcmzx\" (UniqueName: \"kubernetes.io/projected/cc6014b4-e852-4f43-8a94-4ddcd34a32d8-kube-api-access-mcmzx\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.957081 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 16:54:32 crc kubenswrapper[5010]: E1126 16:54:32.957577 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc6014b4-e852-4f43-8a94-4ddcd34a32d8" containerName="mariadb-client-1" Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.957606 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc6014b4-e852-4f43-8a94-4ddcd34a32d8" containerName="mariadb-client-1" Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.957910 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc6014b4-e852-4f43-8a94-4ddcd34a32d8" containerName="mariadb-client-1" Nov 26 16:54:32 crc kubenswrapper[5010]: I1126 16:54:32.958816 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.011111 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.048124 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7773aea187c6f68502e914a0bb25eb4d0d39be429e9be82b260849a438202b76" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.048207 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-1" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.099875 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8xgz\" (UniqueName: \"kubernetes.io/projected/0aa55d6f-4c96-48c0-b579-a6be695da969-kube-api-access-x8xgz\") pod \"mariadb-client-4-default\" (UID: \"0aa55d6f-4c96-48c0-b579-a6be695da969\") " pod="openstack/mariadb-client-4-default" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.201493 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8xgz\" (UniqueName: \"kubernetes.io/projected/0aa55d6f-4c96-48c0-b579-a6be695da969-kube-api-access-x8xgz\") pod \"mariadb-client-4-default\" (UID: \"0aa55d6f-4c96-48c0-b579-a6be695da969\") " pod="openstack/mariadb-client-4-default" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.235121 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8xgz\" (UniqueName: \"kubernetes.io/projected/0aa55d6f-4c96-48c0-b579-a6be695da969-kube-api-access-x8xgz\") pod \"mariadb-client-4-default\" (UID: \"0aa55d6f-4c96-48c0-b579-a6be695da969\") " pod="openstack/mariadb-client-4-default" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.329467 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.903463 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cc6014b4-e852-4f43-8a94-4ddcd34a32d8" path="/var/lib/kubelet/pods/cc6014b4-e852-4f43-8a94-4ddcd34a32d8/volumes" Nov 26 16:54:33 crc kubenswrapper[5010]: I1126 16:54:33.931274 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 16:54:34 crc kubenswrapper[5010]: I1126 16:54:34.069330 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"0aa55d6f-4c96-48c0-b579-a6be695da969","Type":"ContainerStarted","Data":"2f0971faca83e94e07c15bfc871b10c1177796209992290f1404fd9dd1da67c4"} Nov 26 16:54:35 crc kubenswrapper[5010]: I1126 16:54:35.082693 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-4-default" event={"ID":"0aa55d6f-4c96-48c0-b579-a6be695da969","Type":"ContainerDied","Data":"a864714f865246ab5113f5b1e3e5a26ee62fd9816deddcfcc5dd4bb4a579279b"} Nov 26 16:54:35 crc kubenswrapper[5010]: I1126 16:54:35.082540 5010 generic.go:334] "Generic (PLEG): container finished" podID="0aa55d6f-4c96-48c0-b579-a6be695da969" containerID="a864714f865246ab5113f5b1e3e5a26ee62fd9816deddcfcc5dd4bb4a579279b" exitCode=0 Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.487991 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.488042 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.488415 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.509436 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-4-default_0aa55d6f-4c96-48c0-b579-a6be695da969/mariadb-client-4-default/0.log" Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.539917 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.549952 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-4-default"] Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.565135 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.662812 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8xgz\" (UniqueName: \"kubernetes.io/projected/0aa55d6f-4c96-48c0-b579-a6be695da969-kube-api-access-x8xgz\") pod \"0aa55d6f-4c96-48c0-b579-a6be695da969\" (UID: \"0aa55d6f-4c96-48c0-b579-a6be695da969\") " Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.668861 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0aa55d6f-4c96-48c0-b579-a6be695da969-kube-api-access-x8xgz" (OuterVolumeSpecName: "kube-api-access-x8xgz") pod "0aa55d6f-4c96-48c0-b579-a6be695da969" (UID: "0aa55d6f-4c96-48c0-b579-a6be695da969"). InnerVolumeSpecName "kube-api-access-x8xgz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:36 crc kubenswrapper[5010]: I1126 16:54:36.764774 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8xgz\" (UniqueName: \"kubernetes.io/projected/0aa55d6f-4c96-48c0-b579-a6be695da969-kube-api-access-x8xgz\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:37 crc kubenswrapper[5010]: I1126 16:54:37.103475 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2f0971faca83e94e07c15bfc871b10c1177796209992290f1404fd9dd1da67c4" Nov 26 16:54:37 crc kubenswrapper[5010]: I1126 16:54:37.103858 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-4-default" Nov 26 16:54:37 crc kubenswrapper[5010]: I1126 16:54:37.166478 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:37 crc kubenswrapper[5010]: I1126 16:54:37.226181 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4g8x8"] Nov 26 16:54:37 crc kubenswrapper[5010]: I1126 16:54:37.908934 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0aa55d6f-4c96-48c0-b579-a6be695da969" path="/var/lib/kubelet/pods/0aa55d6f-4c96-48c0-b579-a6be695da969/volumes" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.124473 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4g8x8" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="registry-server" containerID="cri-o://af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d" gracePeriod=2 Nov 26 16:54:39 crc kubenswrapper[5010]: E1126 16:54:39.394030 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode568abc9_37d2_4045_b3ed_ee9c5ced0a55.slice/crio-conmon-af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode568abc9_37d2_4045_b3ed_ee9c5ced0a55.slice/crio-af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.574759 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.719353 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-catalog-content\") pod \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.719832 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2g5vf\" (UniqueName: \"kubernetes.io/projected/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-kube-api-access-2g5vf\") pod \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.719917 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-utilities\") pod \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\" (UID: \"e568abc9-37d2-4045-b3ed-ee9c5ced0a55\") " Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.721765 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-utilities" (OuterVolumeSpecName: "utilities") pod "e568abc9-37d2-4045-b3ed-ee9c5ced0a55" (UID: "e568abc9-37d2-4045-b3ed-ee9c5ced0a55"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.728732 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-kube-api-access-2g5vf" (OuterVolumeSpecName: "kube-api-access-2g5vf") pod "e568abc9-37d2-4045-b3ed-ee9c5ced0a55" (UID: "e568abc9-37d2-4045-b3ed-ee9c5ced0a55"). InnerVolumeSpecName "kube-api-access-2g5vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.816647 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e568abc9-37d2-4045-b3ed-ee9c5ced0a55" (UID: "e568abc9-37d2-4045-b3ed-ee9c5ced0a55"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.822234 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.822259 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2g5vf\" (UniqueName: \"kubernetes.io/projected/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-kube-api-access-2g5vf\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:39 crc kubenswrapper[5010]: I1126 16:54:39.822269 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e568abc9-37d2-4045-b3ed-ee9c5ced0a55-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.149677 5010 generic.go:334] "Generic (PLEG): container finished" podID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerID="af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d" exitCode=0 Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.149852 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4g8x8" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.150832 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4g8x8" event={"ID":"e568abc9-37d2-4045-b3ed-ee9c5ced0a55","Type":"ContainerDied","Data":"af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d"} Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.150910 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4g8x8" event={"ID":"e568abc9-37d2-4045-b3ed-ee9c5ced0a55","Type":"ContainerDied","Data":"2c1a6de68b5f2817cb33d09a0931004288c2d627101a7e2a4bc6677993727ac4"} Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.150949 5010 scope.go:117] "RemoveContainer" containerID="af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.156180 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.189978 5010 scope.go:117] "RemoveContainer" containerID="d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.231069 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4g8x8"] Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.238072 5010 scope.go:117] "RemoveContainer" containerID="6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.240440 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4g8x8"] Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.241037 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.275004 5010 scope.go:117] "RemoveContainer" containerID="af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d" Nov 26 16:54:40 crc kubenswrapper[5010]: E1126 16:54:40.275582 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d\": container with ID starting with af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d not found: ID does not exist" containerID="af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.275642 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d"} err="failed to get container status \"af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d\": rpc error: code = NotFound desc = could not find container \"af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d\": container with ID starting with af180a045ace09bc407ca71fdfbbfff28d00986b2c498005e9875404b25e836d not found: ID does not exist" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.275677 5010 scope.go:117] "RemoveContainer" containerID="d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f" Nov 26 16:54:40 crc kubenswrapper[5010]: E1126 16:54:40.276635 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f\": container with ID starting with d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f not found: ID does not exist" containerID="d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.276672 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f"} err="failed to get container status \"d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f\": rpc error: code = NotFound desc = could not find container \"d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f\": container with ID starting with d921de28b403b389993af1411d738580e871342f01726f6749f097fa37c2eb5f not found: ID does not exist" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.276755 5010 scope.go:117] "RemoveContainer" containerID="6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc" Nov 26 16:54:40 crc kubenswrapper[5010]: E1126 16:54:40.277102 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc\": container with ID starting with 6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc not found: ID does not exist" containerID="6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.277128 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc"} err="failed to get container status \"6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc\": rpc error: code = NotFound desc = could not find container \"6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc\": container with ID starting with 6f13a23d7ab06ac2106cb3cbc5b8c4a9d4d552c5b5e85fe5c9d57a601c0accfc not found: ID does not exist" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.806569 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tnmkc"] Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.867471 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 16:54:40 crc kubenswrapper[5010]: E1126 16:54:40.867831 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="extract-utilities" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.867852 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="extract-utilities" Nov 26 16:54:40 crc kubenswrapper[5010]: E1126 16:54:40.867884 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0aa55d6f-4c96-48c0-b579-a6be695da969" containerName="mariadb-client-4-default" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.867893 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0aa55d6f-4c96-48c0-b579-a6be695da969" containerName="mariadb-client-4-default" Nov 26 16:54:40 crc kubenswrapper[5010]: E1126 16:54:40.867921 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="extract-content" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.867930 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="extract-content" Nov 26 16:54:40 crc kubenswrapper[5010]: E1126 16:54:40.867944 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="registry-server" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.867952 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="registry-server" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.868157 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0aa55d6f-4c96-48c0-b579-a6be695da969" containerName="mariadb-client-4-default" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.868179 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" containerName="registry-server" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.868766 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.879382 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qjbs8" Nov 26 16:54:40 crc kubenswrapper[5010]: I1126 16:54:40.891394 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 16:54:41 crc kubenswrapper[5010]: I1126 16:54:41.048206 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rg7fx\" (UniqueName: \"kubernetes.io/projected/00dc9214-d4f4-4b47-9dc1-72ab996e4677-kube-api-access-rg7fx\") pod \"mariadb-client-5-default\" (UID: \"00dc9214-d4f4-4b47-9dc1-72ab996e4677\") " pod="openstack/mariadb-client-5-default" Nov 26 16:54:41 crc kubenswrapper[5010]: I1126 16:54:41.149867 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rg7fx\" (UniqueName: \"kubernetes.io/projected/00dc9214-d4f4-4b47-9dc1-72ab996e4677-kube-api-access-rg7fx\") pod \"mariadb-client-5-default\" (UID: \"00dc9214-d4f4-4b47-9dc1-72ab996e4677\") " pod="openstack/mariadb-client-5-default" Nov 26 16:54:41 crc kubenswrapper[5010]: I1126 16:54:41.194780 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rg7fx\" (UniqueName: \"kubernetes.io/projected/00dc9214-d4f4-4b47-9dc1-72ab996e4677-kube-api-access-rg7fx\") pod \"mariadb-client-5-default\" (UID: \"00dc9214-d4f4-4b47-9dc1-72ab996e4677\") " pod="openstack/mariadb-client-5-default" Nov 26 16:54:41 crc kubenswrapper[5010]: I1126 16:54:41.200619 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 16:54:41 crc kubenswrapper[5010]: I1126 16:54:41.481640 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 16:54:41 crc kubenswrapper[5010]: I1126 16:54:41.911973 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e568abc9-37d2-4045-b3ed-ee9c5ced0a55" path="/var/lib/kubelet/pods/e568abc9-37d2-4045-b3ed-ee9c5ced0a55/volumes" Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.177981 5010 generic.go:334] "Generic (PLEG): container finished" podID="00dc9214-d4f4-4b47-9dc1-72ab996e4677" containerID="3fa84f011bcdefc65dfed689c2c9e9f21453a840e4ec1715e340f3cf8cad7f3f" exitCode=0 Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.178055 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"00dc9214-d4f4-4b47-9dc1-72ab996e4677","Type":"ContainerDied","Data":"3fa84f011bcdefc65dfed689c2c9e9f21453a840e4ec1715e340f3cf8cad7f3f"} Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.178130 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-5-default" event={"ID":"00dc9214-d4f4-4b47-9dc1-72ab996e4677","Type":"ContainerStarted","Data":"f589d05323a054ae25ef48f961d9c7731015654202fdf90a2cbadebbb11ed955"} Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.178433 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tnmkc" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="registry-server" containerID="cri-o://a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722" gracePeriod=2 Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.694314 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.776950 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-catalog-content\") pod \"94d18f21-57ef-48cf-b466-bce8a161a0d1\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.777116 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzx8t\" (UniqueName: \"kubernetes.io/projected/94d18f21-57ef-48cf-b466-bce8a161a0d1-kube-api-access-nzx8t\") pod \"94d18f21-57ef-48cf-b466-bce8a161a0d1\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.782617 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94d18f21-57ef-48cf-b466-bce8a161a0d1-kube-api-access-nzx8t" (OuterVolumeSpecName: "kube-api-access-nzx8t") pod "94d18f21-57ef-48cf-b466-bce8a161a0d1" (UID: "94d18f21-57ef-48cf-b466-bce8a161a0d1"). InnerVolumeSpecName "kube-api-access-nzx8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.831166 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "94d18f21-57ef-48cf-b466-bce8a161a0d1" (UID: "94d18f21-57ef-48cf-b466-bce8a161a0d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.879004 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-utilities\") pod \"94d18f21-57ef-48cf-b466-bce8a161a0d1\" (UID: \"94d18f21-57ef-48cf-b466-bce8a161a0d1\") " Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.879544 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.879586 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzx8t\" (UniqueName: \"kubernetes.io/projected/94d18f21-57ef-48cf-b466-bce8a161a0d1-kube-api-access-nzx8t\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.881101 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-utilities" (OuterVolumeSpecName: "utilities") pod "94d18f21-57ef-48cf-b466-bce8a161a0d1" (UID: "94d18f21-57ef-48cf-b466-bce8a161a0d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:54:42 crc kubenswrapper[5010]: I1126 16:54:42.982308 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/94d18f21-57ef-48cf-b466-bce8a161a0d1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.194231 5010 generic.go:334] "Generic (PLEG): container finished" podID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerID="a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722" exitCode=0 Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.194296 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnmkc" event={"ID":"94d18f21-57ef-48cf-b466-bce8a161a0d1","Type":"ContainerDied","Data":"a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722"} Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.194350 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tnmkc" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.194373 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tnmkc" event={"ID":"94d18f21-57ef-48cf-b466-bce8a161a0d1","Type":"ContainerDied","Data":"8f4102015030611d3f1c1a8348657b4d88911a3d1b9bf89051ebf1f7e5bce162"} Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.194404 5010 scope.go:117] "RemoveContainer" containerID="a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.230658 5010 scope.go:117] "RemoveContainer" containerID="f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.260290 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tnmkc"] Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.269822 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tnmkc"] Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.296569 5010 scope.go:117] "RemoveContainer" containerID="3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.342251 5010 scope.go:117] "RemoveContainer" containerID="a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722" Nov 26 16:54:43 crc kubenswrapper[5010]: E1126 16:54:43.343256 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722\": container with ID starting with a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722 not found: ID does not exist" containerID="a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.343291 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722"} err="failed to get container status \"a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722\": rpc error: code = NotFound desc = could not find container \"a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722\": container with ID starting with a7f73c429619ebc80e5db1646777fe7e6b66cd7e9e5bf1b6bf6290cb6547c722 not found: ID does not exist" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.343318 5010 scope.go:117] "RemoveContainer" containerID="f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078" Nov 26 16:54:43 crc kubenswrapper[5010]: E1126 16:54:43.343796 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078\": container with ID starting with f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078 not found: ID does not exist" containerID="f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.343849 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078"} err="failed to get container status \"f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078\": rpc error: code = NotFound desc = could not find container \"f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078\": container with ID starting with f82f5fc63e3ba657290161dabaef85ea020334a32ea2fe140ae77e79481cc078 not found: ID does not exist" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.343898 5010 scope.go:117] "RemoveContainer" containerID="3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2" Nov 26 16:54:43 crc kubenswrapper[5010]: E1126 16:54:43.344236 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2\": container with ID starting with 3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2 not found: ID does not exist" containerID="3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.344286 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2"} err="failed to get container status \"3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2\": rpc error: code = NotFound desc = could not find container \"3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2\": container with ID starting with 3332be7e4027f29edebddc984904edb74bad1724a34130ff201d6d32a22a8af2 not found: ID does not exist" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.648086 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.672692 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-5-default_00dc9214-d4f4-4b47-9dc1-72ab996e4677/mariadb-client-5-default/0.log" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.692142 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rg7fx\" (UniqueName: \"kubernetes.io/projected/00dc9214-d4f4-4b47-9dc1-72ab996e4677-kube-api-access-rg7fx\") pod \"00dc9214-d4f4-4b47-9dc1-72ab996e4677\" (UID: \"00dc9214-d4f4-4b47-9dc1-72ab996e4677\") " Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.701791 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00dc9214-d4f4-4b47-9dc1-72ab996e4677-kube-api-access-rg7fx" (OuterVolumeSpecName: "kube-api-access-rg7fx") pod "00dc9214-d4f4-4b47-9dc1-72ab996e4677" (UID: "00dc9214-d4f4-4b47-9dc1-72ab996e4677"). InnerVolumeSpecName "kube-api-access-rg7fx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.713850 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.721686 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-5-default"] Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.793684 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rg7fx\" (UniqueName: \"kubernetes.io/projected/00dc9214-d4f4-4b47-9dc1-72ab996e4677-kube-api-access-rg7fx\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.903284 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00dc9214-d4f4-4b47-9dc1-72ab996e4677" path="/var/lib/kubelet/pods/00dc9214-d4f4-4b47-9dc1-72ab996e4677/volumes" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.903805 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" path="/var/lib/kubelet/pods/94d18f21-57ef-48cf-b466-bce8a161a0d1/volumes" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.904602 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 16:54:43 crc kubenswrapper[5010]: E1126 16:54:43.904897 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="registry-server" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.904915 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="registry-server" Nov 26 16:54:43 crc kubenswrapper[5010]: E1126 16:54:43.904930 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="extract-utilities" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.904936 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="extract-utilities" Nov 26 16:54:43 crc kubenswrapper[5010]: E1126 16:54:43.904951 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="extract-content" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.904957 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="extract-content" Nov 26 16:54:43 crc kubenswrapper[5010]: E1126 16:54:43.904978 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00dc9214-d4f4-4b47-9dc1-72ab996e4677" containerName="mariadb-client-5-default" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.904985 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="00dc9214-d4f4-4b47-9dc1-72ab996e4677" containerName="mariadb-client-5-default" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.905142 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="00dc9214-d4f4-4b47-9dc1-72ab996e4677" containerName="mariadb-client-5-default" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.905155 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="94d18f21-57ef-48cf-b466-bce8a161a0d1" containerName="registry-server" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.905687 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.917574 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 16:54:43 crc kubenswrapper[5010]: I1126 16:54:43.996529 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nj7lg\" (UniqueName: \"kubernetes.io/projected/e3834c64-88fe-4afe-ac91-6a782d593035-kube-api-access-nj7lg\") pod \"mariadb-client-6-default\" (UID: \"e3834c64-88fe-4afe-ac91-6a782d593035\") " pod="openstack/mariadb-client-6-default" Nov 26 16:54:44 crc kubenswrapper[5010]: I1126 16:54:44.098291 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nj7lg\" (UniqueName: \"kubernetes.io/projected/e3834c64-88fe-4afe-ac91-6a782d593035-kube-api-access-nj7lg\") pod \"mariadb-client-6-default\" (UID: \"e3834c64-88fe-4afe-ac91-6a782d593035\") " pod="openstack/mariadb-client-6-default" Nov 26 16:54:44 crc kubenswrapper[5010]: I1126 16:54:44.122818 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nj7lg\" (UniqueName: \"kubernetes.io/projected/e3834c64-88fe-4afe-ac91-6a782d593035-kube-api-access-nj7lg\") pod \"mariadb-client-6-default\" (UID: \"e3834c64-88fe-4afe-ac91-6a782d593035\") " pod="openstack/mariadb-client-6-default" Nov 26 16:54:44 crc kubenswrapper[5010]: I1126 16:54:44.203994 5010 scope.go:117] "RemoveContainer" containerID="3fa84f011bcdefc65dfed689c2c9e9f21453a840e4ec1715e340f3cf8cad7f3f" Nov 26 16:54:44 crc kubenswrapper[5010]: I1126 16:54:44.204024 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-5-default" Nov 26 16:54:44 crc kubenswrapper[5010]: I1126 16:54:44.233488 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 16:54:44 crc kubenswrapper[5010]: I1126 16:54:44.797846 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 16:54:44 crc kubenswrapper[5010]: W1126 16:54:44.805261 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3834c64_88fe_4afe_ac91_6a782d593035.slice/crio-f97e05425ce429e448f2f38176b418b702ca3150f087b1044ad11de772d61fa7 WatchSource:0}: Error finding container f97e05425ce429e448f2f38176b418b702ca3150f087b1044ad11de772d61fa7: Status 404 returned error can't find the container with id f97e05425ce429e448f2f38176b418b702ca3150f087b1044ad11de772d61fa7 Nov 26 16:54:45 crc kubenswrapper[5010]: I1126 16:54:45.221197 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"e3834c64-88fe-4afe-ac91-6a782d593035","Type":"ContainerStarted","Data":"6156357786e17ce4d39659b5524ba3da22ccb04dd47112e0c02b23c44eb44953"} Nov 26 16:54:45 crc kubenswrapper[5010]: I1126 16:54:45.222511 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"e3834c64-88fe-4afe-ac91-6a782d593035","Type":"ContainerStarted","Data":"f97e05425ce429e448f2f38176b418b702ca3150f087b1044ad11de772d61fa7"} Nov 26 16:54:45 crc kubenswrapper[5010]: I1126 16:54:45.241643 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-client-6-default" podStartSLOduration=2.24161463 podStartE2EDuration="2.24161463s" podCreationTimestamp="2025-11-26 16:54:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:54:45.235118109 +0000 UTC m=+5306.025835317" watchObservedRunningTime="2025-11-26 16:54:45.24161463 +0000 UTC m=+5306.032331778" Nov 26 16:54:46 crc kubenswrapper[5010]: I1126 16:54:46.234346 5010 generic.go:334] "Generic (PLEG): container finished" podID="e3834c64-88fe-4afe-ac91-6a782d593035" containerID="6156357786e17ce4d39659b5524ba3da22ccb04dd47112e0c02b23c44eb44953" exitCode=1 Nov 26 16:54:46 crc kubenswrapper[5010]: I1126 16:54:46.234424 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-6-default" event={"ID":"e3834c64-88fe-4afe-ac91-6a782d593035","Type":"ContainerDied","Data":"6156357786e17ce4d39659b5524ba3da22ccb04dd47112e0c02b23c44eb44953"} Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.641260 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.693842 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.701612 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-6-default"] Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.753915 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nj7lg\" (UniqueName: \"kubernetes.io/projected/e3834c64-88fe-4afe-ac91-6a782d593035-kube-api-access-nj7lg\") pod \"e3834c64-88fe-4afe-ac91-6a782d593035\" (UID: \"e3834c64-88fe-4afe-ac91-6a782d593035\") " Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.759758 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3834c64-88fe-4afe-ac91-6a782d593035-kube-api-access-nj7lg" (OuterVolumeSpecName: "kube-api-access-nj7lg") pod "e3834c64-88fe-4afe-ac91-6a782d593035" (UID: "e3834c64-88fe-4afe-ac91-6a782d593035"). InnerVolumeSpecName "kube-api-access-nj7lg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.821347 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 16:54:47 crc kubenswrapper[5010]: E1126 16:54:47.821885 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3834c64-88fe-4afe-ac91-6a782d593035" containerName="mariadb-client-6-default" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.821918 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3834c64-88fe-4afe-ac91-6a782d593035" containerName="mariadb-client-6-default" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.822174 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3834c64-88fe-4afe-ac91-6a782d593035" containerName="mariadb-client-6-default" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.823090 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.832500 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.855313 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nj7lg\" (UniqueName: \"kubernetes.io/projected/e3834c64-88fe-4afe-ac91-6a782d593035-kube-api-access-nj7lg\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.900243 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3834c64-88fe-4afe-ac91-6a782d593035" path="/var/lib/kubelet/pods/e3834c64-88fe-4afe-ac91-6a782d593035/volumes" Nov 26 16:54:47 crc kubenswrapper[5010]: I1126 16:54:47.957081 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tmsd\" (UniqueName: \"kubernetes.io/projected/8fb561fe-5413-4aee-a180-1b3232dba216-kube-api-access-8tmsd\") pod \"mariadb-client-7-default\" (UID: \"8fb561fe-5413-4aee-a180-1b3232dba216\") " pod="openstack/mariadb-client-7-default" Nov 26 16:54:48 crc kubenswrapper[5010]: I1126 16:54:48.059378 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tmsd\" (UniqueName: \"kubernetes.io/projected/8fb561fe-5413-4aee-a180-1b3232dba216-kube-api-access-8tmsd\") pod \"mariadb-client-7-default\" (UID: \"8fb561fe-5413-4aee-a180-1b3232dba216\") " pod="openstack/mariadb-client-7-default" Nov 26 16:54:48 crc kubenswrapper[5010]: I1126 16:54:48.084488 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tmsd\" (UniqueName: \"kubernetes.io/projected/8fb561fe-5413-4aee-a180-1b3232dba216-kube-api-access-8tmsd\") pod \"mariadb-client-7-default\" (UID: \"8fb561fe-5413-4aee-a180-1b3232dba216\") " pod="openstack/mariadb-client-7-default" Nov 26 16:54:48 crc kubenswrapper[5010]: I1126 16:54:48.146041 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 16:54:48 crc kubenswrapper[5010]: I1126 16:54:48.272528 5010 scope.go:117] "RemoveContainer" containerID="6156357786e17ce4d39659b5524ba3da22ccb04dd47112e0c02b23c44eb44953" Nov 26 16:54:48 crc kubenswrapper[5010]: I1126 16:54:48.272644 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-6-default" Nov 26 16:54:48 crc kubenswrapper[5010]: I1126 16:54:48.735486 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 16:54:49 crc kubenswrapper[5010]: I1126 16:54:49.289416 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"8fb561fe-5413-4aee-a180-1b3232dba216","Type":"ContainerDied","Data":"33951497e782d288e6726a745cda9e4d9e0b01f1d218bf1851f2bc1e686d026c"} Nov 26 16:54:49 crc kubenswrapper[5010]: I1126 16:54:49.289386 5010 generic.go:334] "Generic (PLEG): container finished" podID="8fb561fe-5413-4aee-a180-1b3232dba216" containerID="33951497e782d288e6726a745cda9e4d9e0b01f1d218bf1851f2bc1e686d026c" exitCode=0 Nov 26 16:54:49 crc kubenswrapper[5010]: I1126 16:54:49.289632 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-7-default" event={"ID":"8fb561fe-5413-4aee-a180-1b3232dba216","Type":"ContainerStarted","Data":"b527dff40ae26f7ea38e086849f858a428a1c0d042f3c3b5a34c575a61240c5f"} Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.677373 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.697146 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-7-default_8fb561fe-5413-4aee-a180-1b3232dba216/mariadb-client-7-default/0.log" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.707907 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tmsd\" (UniqueName: \"kubernetes.io/projected/8fb561fe-5413-4aee-a180-1b3232dba216-kube-api-access-8tmsd\") pod \"8fb561fe-5413-4aee-a180-1b3232dba216\" (UID: \"8fb561fe-5413-4aee-a180-1b3232dba216\") " Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.713324 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fb561fe-5413-4aee-a180-1b3232dba216-kube-api-access-8tmsd" (OuterVolumeSpecName: "kube-api-access-8tmsd") pod "8fb561fe-5413-4aee-a180-1b3232dba216" (UID: "8fb561fe-5413-4aee-a180-1b3232dba216"). InnerVolumeSpecName "kube-api-access-8tmsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.727623 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.738476 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-7-default"] Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.809698 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tmsd\" (UniqueName: \"kubernetes.io/projected/8fb561fe-5413-4aee-a180-1b3232dba216-kube-api-access-8tmsd\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.914642 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client-2"] Nov 26 16:54:50 crc kubenswrapper[5010]: E1126 16:54:50.915548 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fb561fe-5413-4aee-a180-1b3232dba216" containerName="mariadb-client-7-default" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.915587 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fb561fe-5413-4aee-a180-1b3232dba216" containerName="mariadb-client-7-default" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.916207 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fb561fe-5413-4aee-a180-1b3232dba216" containerName="mariadb-client-7-default" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.917556 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 16:54:50 crc kubenswrapper[5010]: I1126 16:54:50.935429 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.013856 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2jl6\" (UniqueName: \"kubernetes.io/projected/c0c690dc-2e00-4e95-9100-1ccd41ecf1cd-kube-api-access-c2jl6\") pod \"mariadb-client-2\" (UID: \"c0c690dc-2e00-4e95-9100-1ccd41ecf1cd\") " pod="openstack/mariadb-client-2" Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.115687 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2jl6\" (UniqueName: \"kubernetes.io/projected/c0c690dc-2e00-4e95-9100-1ccd41ecf1cd-kube-api-access-c2jl6\") pod \"mariadb-client-2\" (UID: \"c0c690dc-2e00-4e95-9100-1ccd41ecf1cd\") " pod="openstack/mariadb-client-2" Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.137098 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2jl6\" (UniqueName: \"kubernetes.io/projected/c0c690dc-2e00-4e95-9100-1ccd41ecf1cd-kube-api-access-c2jl6\") pod \"mariadb-client-2\" (UID: \"c0c690dc-2e00-4e95-9100-1ccd41ecf1cd\") " pod="openstack/mariadb-client-2" Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.266370 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.328353 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b527dff40ae26f7ea38e086849f858a428a1c0d042f3c3b5a34c575a61240c5f" Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.328415 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-7-default" Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.637046 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 16:54:51 crc kubenswrapper[5010]: I1126 16:54:51.903893 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fb561fe-5413-4aee-a180-1b3232dba216" path="/var/lib/kubelet/pods/8fb561fe-5413-4aee-a180-1b3232dba216/volumes" Nov 26 16:54:52 crc kubenswrapper[5010]: I1126 16:54:52.344775 5010 generic.go:334] "Generic (PLEG): container finished" podID="c0c690dc-2e00-4e95-9100-1ccd41ecf1cd" containerID="7a0a67ffd90f2c2fc7f0a041e47adfb1d288e8510fa33e8b7b4a0b327dfc7e6e" exitCode=0 Nov 26 16:54:52 crc kubenswrapper[5010]: I1126 16:54:52.344835 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"c0c690dc-2e00-4e95-9100-1ccd41ecf1cd","Type":"ContainerDied","Data":"7a0a67ffd90f2c2fc7f0a041e47adfb1d288e8510fa33e8b7b4a0b327dfc7e6e"} Nov 26 16:54:52 crc kubenswrapper[5010]: I1126 16:54:52.344871 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client-2" event={"ID":"c0c690dc-2e00-4e95-9100-1ccd41ecf1cd","Type":"ContainerStarted","Data":"3e665914ab71e8d2a9aeb7592502400582f57155940055170da8c6cecf758738"} Nov 26 16:54:53 crc kubenswrapper[5010]: I1126 16:54:53.836691 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 16:54:53 crc kubenswrapper[5010]: I1126 16:54:53.859991 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client-2_c0c690dc-2e00-4e95-9100-1ccd41ecf1cd/mariadb-client-2/0.log" Nov 26 16:54:53 crc kubenswrapper[5010]: I1126 16:54:53.949373 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 16:54:53 crc kubenswrapper[5010]: I1126 16:54:53.959213 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client-2"] Nov 26 16:54:53 crc kubenswrapper[5010]: I1126 16:54:53.988997 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2jl6\" (UniqueName: \"kubernetes.io/projected/c0c690dc-2e00-4e95-9100-1ccd41ecf1cd-kube-api-access-c2jl6\") pod \"c0c690dc-2e00-4e95-9100-1ccd41ecf1cd\" (UID: \"c0c690dc-2e00-4e95-9100-1ccd41ecf1cd\") " Nov 26 16:54:53 crc kubenswrapper[5010]: I1126 16:54:53.996390 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0c690dc-2e00-4e95-9100-1ccd41ecf1cd-kube-api-access-c2jl6" (OuterVolumeSpecName: "kube-api-access-c2jl6") pod "c0c690dc-2e00-4e95-9100-1ccd41ecf1cd" (UID: "c0c690dc-2e00-4e95-9100-1ccd41ecf1cd"). InnerVolumeSpecName "kube-api-access-c2jl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:54:54 crc kubenswrapper[5010]: I1126 16:54:54.091117 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2jl6\" (UniqueName: \"kubernetes.io/projected/c0c690dc-2e00-4e95-9100-1ccd41ecf1cd-kube-api-access-c2jl6\") on node \"crc\" DevicePath \"\"" Nov 26 16:54:54 crc kubenswrapper[5010]: I1126 16:54:54.366014 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e665914ab71e8d2a9aeb7592502400582f57155940055170da8c6cecf758738" Nov 26 16:54:54 crc kubenswrapper[5010]: I1126 16:54:54.366105 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client-2" Nov 26 16:54:55 crc kubenswrapper[5010]: I1126 16:54:55.907980 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0c690dc-2e00-4e95-9100-1ccd41ecf1cd" path="/var/lib/kubelet/pods/c0c690dc-2e00-4e95-9100-1ccd41ecf1cd/volumes" Nov 26 16:56:05 crc kubenswrapper[5010]: I1126 16:56:05.264386 5010 scope.go:117] "RemoveContainer" containerID="684fd4866228c14fd55457446cfd74dcb37f9fd5c9d6bd860f5bd253b2726a62" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.437747 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-j47wt"] Nov 26 16:56:17 crc kubenswrapper[5010]: E1126 16:56:17.438933 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c690dc-2e00-4e95-9100-1ccd41ecf1cd" containerName="mariadb-client-2" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.438950 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c690dc-2e00-4e95-9100-1ccd41ecf1cd" containerName="mariadb-client-2" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.439176 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c690dc-2e00-4e95-9100-1ccd41ecf1cd" containerName="mariadb-client-2" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.440753 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.470083 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j47wt"] Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.566128 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9s9r\" (UniqueName: \"kubernetes.io/projected/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-kube-api-access-z9s9r\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.566263 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-utilities\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.566297 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-catalog-content\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.667267 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9s9r\" (UniqueName: \"kubernetes.io/projected/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-kube-api-access-z9s9r\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.667356 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-utilities\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.667386 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-catalog-content\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.668558 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-utilities\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.668575 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-catalog-content\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.689372 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9s9r\" (UniqueName: \"kubernetes.io/projected/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-kube-api-access-z9s9r\") pod \"redhat-marketplace-j47wt\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:17 crc kubenswrapper[5010]: I1126 16:56:17.768133 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:18 crc kubenswrapper[5010]: I1126 16:56:18.246042 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-j47wt"] Nov 26 16:56:19 crc kubenswrapper[5010]: I1126 16:56:19.201736 5010 generic.go:334] "Generic (PLEG): container finished" podID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerID="1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3" exitCode=0 Nov 26 16:56:19 crc kubenswrapper[5010]: I1126 16:56:19.201833 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j47wt" event={"ID":"52c04bb5-84ed-44e6-a6db-c5e05a241d0d","Type":"ContainerDied","Data":"1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3"} Nov 26 16:56:19 crc kubenswrapper[5010]: I1126 16:56:19.202255 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j47wt" event={"ID":"52c04bb5-84ed-44e6-a6db-c5e05a241d0d","Type":"ContainerStarted","Data":"dba15986c9aa62fa9de842784cc7f7df75fb870e35245cd08fad59f056bfcea3"} Nov 26 16:56:19 crc kubenswrapper[5010]: I1126 16:56:19.205398 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 16:56:20 crc kubenswrapper[5010]: I1126 16:56:20.211961 5010 generic.go:334] "Generic (PLEG): container finished" podID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerID="74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97" exitCode=0 Nov 26 16:56:20 crc kubenswrapper[5010]: I1126 16:56:20.212239 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j47wt" event={"ID":"52c04bb5-84ed-44e6-a6db-c5e05a241d0d","Type":"ContainerDied","Data":"74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97"} Nov 26 16:56:22 crc kubenswrapper[5010]: I1126 16:56:22.231318 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j47wt" event={"ID":"52c04bb5-84ed-44e6-a6db-c5e05a241d0d","Type":"ContainerStarted","Data":"757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa"} Nov 26 16:56:22 crc kubenswrapper[5010]: I1126 16:56:22.256322 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-j47wt" podStartSLOduration=3.290959329 podStartE2EDuration="5.256289706s" podCreationTimestamp="2025-11-26 16:56:17 +0000 UTC" firstStartedPulling="2025-11-26 16:56:19.204589525 +0000 UTC m=+5399.995306713" lastFinishedPulling="2025-11-26 16:56:21.169919942 +0000 UTC m=+5401.960637090" observedRunningTime="2025-11-26 16:56:22.248127023 +0000 UTC m=+5403.038844171" watchObservedRunningTime="2025-11-26 16:56:22.256289706 +0000 UTC m=+5403.047006854" Nov 26 16:56:27 crc kubenswrapper[5010]: I1126 16:56:27.769100 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:27 crc kubenswrapper[5010]: I1126 16:56:27.769981 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:27 crc kubenswrapper[5010]: I1126 16:56:27.847360 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:28 crc kubenswrapper[5010]: I1126 16:56:28.402829 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:28 crc kubenswrapper[5010]: I1126 16:56:28.485878 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j47wt"] Nov 26 16:56:30 crc kubenswrapper[5010]: I1126 16:56:30.302872 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-j47wt" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="registry-server" containerID="cri-o://757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa" gracePeriod=2 Nov 26 16:56:30 crc kubenswrapper[5010]: I1126 16:56:30.847671 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:30 crc kubenswrapper[5010]: I1126 16:56:30.992093 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9s9r\" (UniqueName: \"kubernetes.io/projected/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-kube-api-access-z9s9r\") pod \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " Nov 26 16:56:30 crc kubenswrapper[5010]: I1126 16:56:30.992236 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-catalog-content\") pod \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " Nov 26 16:56:30 crc kubenswrapper[5010]: I1126 16:56:30.992301 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-utilities\") pod \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\" (UID: \"52c04bb5-84ed-44e6-a6db-c5e05a241d0d\") " Nov 26 16:56:30 crc kubenswrapper[5010]: I1126 16:56:30.993395 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-utilities" (OuterVolumeSpecName: "utilities") pod "52c04bb5-84ed-44e6-a6db-c5e05a241d0d" (UID: "52c04bb5-84ed-44e6-a6db-c5e05a241d0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.004454 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-kube-api-access-z9s9r" (OuterVolumeSpecName: "kube-api-access-z9s9r") pod "52c04bb5-84ed-44e6-a6db-c5e05a241d0d" (UID: "52c04bb5-84ed-44e6-a6db-c5e05a241d0d"). InnerVolumeSpecName "kube-api-access-z9s9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.010241 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52c04bb5-84ed-44e6-a6db-c5e05a241d0d" (UID: "52c04bb5-84ed-44e6-a6db-c5e05a241d0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.094758 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9s9r\" (UniqueName: \"kubernetes.io/projected/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-kube-api-access-z9s9r\") on node \"crc\" DevicePath \"\"" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.094802 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.094815 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52c04bb5-84ed-44e6-a6db-c5e05a241d0d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.315297 5010 generic.go:334] "Generic (PLEG): container finished" podID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerID="757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa" exitCode=0 Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.315384 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-j47wt" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.315391 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j47wt" event={"ID":"52c04bb5-84ed-44e6-a6db-c5e05a241d0d","Type":"ContainerDied","Data":"757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa"} Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.315538 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-j47wt" event={"ID":"52c04bb5-84ed-44e6-a6db-c5e05a241d0d","Type":"ContainerDied","Data":"dba15986c9aa62fa9de842784cc7f7df75fb870e35245cd08fad59f056bfcea3"} Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.315559 5010 scope.go:117] "RemoveContainer" containerID="757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.343836 5010 scope.go:117] "RemoveContainer" containerID="74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.356494 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-j47wt"] Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.371945 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-j47wt"] Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.378406 5010 scope.go:117] "RemoveContainer" containerID="1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.416215 5010 scope.go:117] "RemoveContainer" containerID="757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa" Nov 26 16:56:31 crc kubenswrapper[5010]: E1126 16:56:31.416680 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa\": container with ID starting with 757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa not found: ID does not exist" containerID="757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.416750 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa"} err="failed to get container status \"757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa\": rpc error: code = NotFound desc = could not find container \"757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa\": container with ID starting with 757c266f0de142f668a40c760944d7355baafcd9ddaf8baccfb06f65f91f7caa not found: ID does not exist" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.416786 5010 scope.go:117] "RemoveContainer" containerID="74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97" Nov 26 16:56:31 crc kubenswrapper[5010]: E1126 16:56:31.417256 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97\": container with ID starting with 74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97 not found: ID does not exist" containerID="74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.417312 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97"} err="failed to get container status \"74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97\": rpc error: code = NotFound desc = could not find container \"74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97\": container with ID starting with 74ebd92112724d4071c673f23221fa5ad52e265c74b27f0386384048ee5dec97 not found: ID does not exist" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.417351 5010 scope.go:117] "RemoveContainer" containerID="1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3" Nov 26 16:56:31 crc kubenswrapper[5010]: E1126 16:56:31.417720 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3\": container with ID starting with 1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3 not found: ID does not exist" containerID="1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.417754 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3"} err="failed to get container status \"1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3\": rpc error: code = NotFound desc = could not find container \"1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3\": container with ID starting with 1f4a84516d44a2a69591b6de2ed5ab00adfbd1b937e1e30a3ad31e050310e3d3 not found: ID does not exist" Nov 26 16:56:31 crc kubenswrapper[5010]: I1126 16:56:31.900572 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" path="/var/lib/kubelet/pods/52c04bb5-84ed-44e6-a6db-c5e05a241d0d/volumes" Nov 26 16:56:41 crc kubenswrapper[5010]: I1126 16:56:41.423267 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:56:41 crc kubenswrapper[5010]: I1126 16:56:41.424068 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:57:11 crc kubenswrapper[5010]: I1126 16:57:11.423959 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:57:11 crc kubenswrapper[5010]: I1126 16:57:11.424940 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.086975 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-prr7m"] Nov 26 16:57:33 crc kubenswrapper[5010]: E1126 16:57:33.088019 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="registry-server" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.088033 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="registry-server" Nov 26 16:57:33 crc kubenswrapper[5010]: E1126 16:57:33.088048 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="extract-utilities" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.088055 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="extract-utilities" Nov 26 16:57:33 crc kubenswrapper[5010]: E1126 16:57:33.088066 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="extract-content" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.088073 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="extract-content" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.088216 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="52c04bb5-84ed-44e6-a6db-c5e05a241d0d" containerName="registry-server" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.089869 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.102593 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-prr7m"] Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.236855 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-utilities\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.237213 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chv8j\" (UniqueName: \"kubernetes.io/projected/8d2e746f-8a21-453a-b29f-db02c74e06d8-kube-api-access-chv8j\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.237339 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-catalog-content\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.339014 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chv8j\" (UniqueName: \"kubernetes.io/projected/8d2e746f-8a21-453a-b29f-db02c74e06d8-kube-api-access-chv8j\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.339076 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-catalog-content\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.339126 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-utilities\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.339734 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-utilities\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.339806 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-catalog-content\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.361647 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chv8j\" (UniqueName: \"kubernetes.io/projected/8d2e746f-8a21-453a-b29f-db02c74e06d8-kube-api-access-chv8j\") pod \"certified-operators-prr7m\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.412269 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.861182 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-prr7m"] Nov 26 16:57:33 crc kubenswrapper[5010]: I1126 16:57:33.943846 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prr7m" event={"ID":"8d2e746f-8a21-453a-b29f-db02c74e06d8","Type":"ContainerStarted","Data":"6f0b785ad007ecfe196950fcc5c3dceb5d90484cd91dd1e9204f83ec7042b865"} Nov 26 16:57:34 crc kubenswrapper[5010]: I1126 16:57:34.954831 5010 generic.go:334] "Generic (PLEG): container finished" podID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerID="3030a9e573d20520dfce3be890de502d41d9416cfc7f85653432ba829427304d" exitCode=0 Nov 26 16:57:34 crc kubenswrapper[5010]: I1126 16:57:34.955014 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prr7m" event={"ID":"8d2e746f-8a21-453a-b29f-db02c74e06d8","Type":"ContainerDied","Data":"3030a9e573d20520dfce3be890de502d41d9416cfc7f85653432ba829427304d"} Nov 26 16:57:40 crc kubenswrapper[5010]: I1126 16:57:40.000770 5010 generic.go:334] "Generic (PLEG): container finished" podID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerID="5155b8a6186109f594e7a6341cfdfa07eb32409b5aaf43a9c08a1bea3cad00dc" exitCode=0 Nov 26 16:57:40 crc kubenswrapper[5010]: I1126 16:57:40.000838 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prr7m" event={"ID":"8d2e746f-8a21-453a-b29f-db02c74e06d8","Type":"ContainerDied","Data":"5155b8a6186109f594e7a6341cfdfa07eb32409b5aaf43a9c08a1bea3cad00dc"} Nov 26 16:57:41 crc kubenswrapper[5010]: I1126 16:57:41.423194 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:57:41 crc kubenswrapper[5010]: I1126 16:57:41.423528 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:57:41 crc kubenswrapper[5010]: I1126 16:57:41.423581 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 16:57:41 crc kubenswrapper[5010]: I1126 16:57:41.424528 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9bc507e475d0d8669f1bff93162119416272f1c73d6cf135f83056541ab8c1ac"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 16:57:41 crc kubenswrapper[5010]: I1126 16:57:41.424597 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://9bc507e475d0d8669f1bff93162119416272f1c73d6cf135f83056541ab8c1ac" gracePeriod=600 Nov 26 16:57:42 crc kubenswrapper[5010]: I1126 16:57:42.018155 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prr7m" event={"ID":"8d2e746f-8a21-453a-b29f-db02c74e06d8","Type":"ContainerStarted","Data":"4c3aa545b2d5ef30b93075365cc32f724dc2f50ebc1de478ed7eff8ba11435c9"} Nov 26 16:57:42 crc kubenswrapper[5010]: I1126 16:57:42.022109 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="9bc507e475d0d8669f1bff93162119416272f1c73d6cf135f83056541ab8c1ac" exitCode=0 Nov 26 16:57:42 crc kubenswrapper[5010]: I1126 16:57:42.022155 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"9bc507e475d0d8669f1bff93162119416272f1c73d6cf135f83056541ab8c1ac"} Nov 26 16:57:42 crc kubenswrapper[5010]: I1126 16:57:42.022181 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902"} Nov 26 16:57:42 crc kubenswrapper[5010]: I1126 16:57:42.022199 5010 scope.go:117] "RemoveContainer" containerID="6bca0b33f723c07c6394ec98e6d19315dda84235a9db5c5e694215a8e383772c" Nov 26 16:57:42 crc kubenswrapper[5010]: I1126 16:57:42.049862 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-prr7m" podStartSLOduration=3.15416423 podStartE2EDuration="9.049840623s" podCreationTimestamp="2025-11-26 16:57:33 +0000 UTC" firstStartedPulling="2025-11-26 16:57:34.957778497 +0000 UTC m=+5475.748495645" lastFinishedPulling="2025-11-26 16:57:40.85345486 +0000 UTC m=+5481.644172038" observedRunningTime="2025-11-26 16:57:42.046301795 +0000 UTC m=+5482.837018963" watchObservedRunningTime="2025-11-26 16:57:42.049840623 +0000 UTC m=+5482.840557791" Nov 26 16:57:43 crc kubenswrapper[5010]: I1126 16:57:43.412552 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:43 crc kubenswrapper[5010]: I1126 16:57:43.414219 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:43 crc kubenswrapper[5010]: I1126 16:57:43.467956 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:53 crc kubenswrapper[5010]: I1126 16:57:53.456869 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 16:57:53 crc kubenswrapper[5010]: I1126 16:57:53.517941 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-prr7m"] Nov 26 16:57:53 crc kubenswrapper[5010]: I1126 16:57:53.550734 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sm5mr"] Nov 26 16:57:53 crc kubenswrapper[5010]: I1126 16:57:53.551055 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sm5mr" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="registry-server" containerID="cri-o://612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c" gracePeriod=2 Nov 26 16:57:53 crc kubenswrapper[5010]: E1126 16:57:53.692344 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb54acd34_9bcf_4b3d_aea8_9b30e7dd6972.slice/crio-612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb54acd34_9bcf_4b3d_aea8_9b30e7dd6972.slice/crio-conmon-612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c.scope\": RecentStats: unable to find data in memory cache]" Nov 26 16:57:53 crc kubenswrapper[5010]: I1126 16:57:53.996311 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.093772 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9wrm\" (UniqueName: \"kubernetes.io/projected/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-kube-api-access-h9wrm\") pod \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.093843 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-catalog-content\") pod \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.093975 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-utilities\") pod \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\" (UID: \"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972\") " Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.094810 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-utilities" (OuterVolumeSpecName: "utilities") pod "b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" (UID: "b54acd34-9bcf-4b3d-aea8-9b30e7dd6972"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.099409 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-kube-api-access-h9wrm" (OuterVolumeSpecName: "kube-api-access-h9wrm") pod "b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" (UID: "b54acd34-9bcf-4b3d-aea8-9b30e7dd6972"). InnerVolumeSpecName "kube-api-access-h9wrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.140433 5010 generic.go:334] "Generic (PLEG): container finished" podID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerID="612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c" exitCode=0 Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.140744 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sm5mr" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.141245 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sm5mr" event={"ID":"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972","Type":"ContainerDied","Data":"612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c"} Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.141280 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sm5mr" event={"ID":"b54acd34-9bcf-4b3d-aea8-9b30e7dd6972","Type":"ContainerDied","Data":"b0dc9cad230e71f295bb573432d7eb28962d03fa7601483df0477cf7155d339c"} Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.141299 5010 scope.go:117] "RemoveContainer" containerID="612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.145201 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" (UID: "b54acd34-9bcf-4b3d-aea8-9b30e7dd6972"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.169070 5010 scope.go:117] "RemoveContainer" containerID="32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.195601 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9wrm\" (UniqueName: \"kubernetes.io/projected/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-kube-api-access-h9wrm\") on node \"crc\" DevicePath \"\"" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.195635 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.195647 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.218551 5010 scope.go:117] "RemoveContainer" containerID="e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.245063 5010 scope.go:117] "RemoveContainer" containerID="612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c" Nov 26 16:57:54 crc kubenswrapper[5010]: E1126 16:57:54.246147 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c\": container with ID starting with 612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c not found: ID does not exist" containerID="612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.246202 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c"} err="failed to get container status \"612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c\": rpc error: code = NotFound desc = could not find container \"612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c\": container with ID starting with 612db8b60e984dbc0de4b0d1ba070f5805cb225ae96cd293a94ef9699d0bb71c not found: ID does not exist" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.246239 5010 scope.go:117] "RemoveContainer" containerID="32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8" Nov 26 16:57:54 crc kubenswrapper[5010]: E1126 16:57:54.247379 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8\": container with ID starting with 32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8 not found: ID does not exist" containerID="32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.247497 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8"} err="failed to get container status \"32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8\": rpc error: code = NotFound desc = could not find container \"32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8\": container with ID starting with 32c361658cc805f0ad86252f60494fe2193c402ee7ef8209d502d357e34914f8 not found: ID does not exist" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.247589 5010 scope.go:117] "RemoveContainer" containerID="e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56" Nov 26 16:57:54 crc kubenswrapper[5010]: E1126 16:57:54.248060 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56\": container with ID starting with e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56 not found: ID does not exist" containerID="e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.248157 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56"} err="failed to get container status \"e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56\": rpc error: code = NotFound desc = could not find container \"e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56\": container with ID starting with e641bc24b9a3b33df8c41fa2d01ee169afddb916d2b70066496dae4f79dd8c56 not found: ID does not exist" Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.471167 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sm5mr"] Nov 26 16:57:54 crc kubenswrapper[5010]: I1126 16:57:54.477683 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sm5mr"] Nov 26 16:57:55 crc kubenswrapper[5010]: I1126 16:57:55.904830 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" path="/var/lib/kubelet/pods/b54acd34-9bcf-4b3d-aea8-9b30e7dd6972/volumes" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.045224 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 16:58:10 crc kubenswrapper[5010]: E1126 16:58:10.046282 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="registry-server" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.046300 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="registry-server" Nov 26 16:58:10 crc kubenswrapper[5010]: E1126 16:58:10.046317 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="extract-utilities" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.046325 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="extract-utilities" Nov 26 16:58:10 crc kubenswrapper[5010]: E1126 16:58:10.046335 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="extract-content" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.046343 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="extract-content" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.046513 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b54acd34-9bcf-4b3d-aea8-9b30e7dd6972" containerName="registry-server" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.047182 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.056466 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-qjbs8" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.067658 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.156215 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\") pod \"mariadb-copy-data\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.156393 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc5fw\" (UniqueName: \"kubernetes.io/projected/bada47cf-95f0-498b-b0e7-0955fb512714-kube-api-access-qc5fw\") pod \"mariadb-copy-data\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.258471 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc5fw\" (UniqueName: \"kubernetes.io/projected/bada47cf-95f0-498b-b0e7-0955fb512714-kube-api-access-qc5fw\") pod \"mariadb-copy-data\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.258705 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\") pod \"mariadb-copy-data\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.261981 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.262034 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\") pod \"mariadb-copy-data\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/682515fbd4ec5bd1caf2964a9c005f3c94f31397d919d4912b63d10da4068cbd/globalmount\"" pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.293474 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc5fw\" (UniqueName: \"kubernetes.io/projected/bada47cf-95f0-498b-b0e7-0955fb512714-kube-api-access-qc5fw\") pod \"mariadb-copy-data\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.322016 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\") pod \"mariadb-copy-data\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.365320 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 16:58:10 crc kubenswrapper[5010]: I1126 16:58:10.881567 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 16:58:11 crc kubenswrapper[5010]: I1126 16:58:11.331414 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"bada47cf-95f0-498b-b0e7-0955fb512714","Type":"ContainerStarted","Data":"91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368"} Nov 26 16:58:11 crc kubenswrapper[5010]: I1126 16:58:11.331917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"bada47cf-95f0-498b-b0e7-0955fb512714","Type":"ContainerStarted","Data":"62faf63544df5ee062f0a15c11488cc2116a33db64f7924f49c8ad8532bea6ac"} Nov 26 16:58:11 crc kubenswrapper[5010]: I1126 16:58:11.360971 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/mariadb-copy-data" podStartSLOduration=2.360945682 podStartE2EDuration="2.360945682s" podCreationTimestamp="2025-11-26 16:58:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:58:11.356212244 +0000 UTC m=+5512.146929402" watchObservedRunningTime="2025-11-26 16:58:11.360945682 +0000 UTC m=+5512.151662840" Nov 26 16:58:14 crc kubenswrapper[5010]: I1126 16:58:14.419205 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:14 crc kubenswrapper[5010]: I1126 16:58:14.421097 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:14 crc kubenswrapper[5010]: I1126 16:58:14.430453 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:14 crc kubenswrapper[5010]: I1126 16:58:14.533926 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8vgh\" (UniqueName: \"kubernetes.io/projected/690319d1-1c73-44c0-89b9-ad9c32144ae5-kube-api-access-n8vgh\") pod \"mariadb-client\" (UID: \"690319d1-1c73-44c0-89b9-ad9c32144ae5\") " pod="openstack/mariadb-client" Nov 26 16:58:14 crc kubenswrapper[5010]: I1126 16:58:14.635877 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8vgh\" (UniqueName: \"kubernetes.io/projected/690319d1-1c73-44c0-89b9-ad9c32144ae5-kube-api-access-n8vgh\") pod \"mariadb-client\" (UID: \"690319d1-1c73-44c0-89b9-ad9c32144ae5\") " pod="openstack/mariadb-client" Nov 26 16:58:14 crc kubenswrapper[5010]: I1126 16:58:14.668167 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8vgh\" (UniqueName: \"kubernetes.io/projected/690319d1-1c73-44c0-89b9-ad9c32144ae5-kube-api-access-n8vgh\") pod \"mariadb-client\" (UID: \"690319d1-1c73-44c0-89b9-ad9c32144ae5\") " pod="openstack/mariadb-client" Nov 26 16:58:14 crc kubenswrapper[5010]: I1126 16:58:14.741196 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:15 crc kubenswrapper[5010]: I1126 16:58:15.256399 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:15 crc kubenswrapper[5010]: W1126 16:58:15.263270 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod690319d1_1c73_44c0_89b9_ad9c32144ae5.slice/crio-76d6180799af21155d53bde9a2c94d0aed3353e797885322887b5b4d04b0d9d2 WatchSource:0}: Error finding container 76d6180799af21155d53bde9a2c94d0aed3353e797885322887b5b4d04b0d9d2: Status 404 returned error can't find the container with id 76d6180799af21155d53bde9a2c94d0aed3353e797885322887b5b4d04b0d9d2 Nov 26 16:58:15 crc kubenswrapper[5010]: I1126 16:58:15.371354 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"690319d1-1c73-44c0-89b9-ad9c32144ae5","Type":"ContainerStarted","Data":"76d6180799af21155d53bde9a2c94d0aed3353e797885322887b5b4d04b0d9d2"} Nov 26 16:58:16 crc kubenswrapper[5010]: I1126 16:58:16.384632 5010 generic.go:334] "Generic (PLEG): container finished" podID="690319d1-1c73-44c0-89b9-ad9c32144ae5" containerID="5aaff49027cce3d27cf849f5178f4accf08e1dab06d73faef78574d7f3571bef" exitCode=0 Nov 26 16:58:16 crc kubenswrapper[5010]: I1126 16:58:16.384694 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"690319d1-1c73-44c0-89b9-ad9c32144ae5","Type":"ContainerDied","Data":"5aaff49027cce3d27cf849f5178f4accf08e1dab06d73faef78574d7f3571bef"} Nov 26 16:58:17 crc kubenswrapper[5010]: I1126 16:58:17.794778 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:17 crc kubenswrapper[5010]: I1126 16:58:17.818629 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_690319d1-1c73-44c0-89b9-ad9c32144ae5/mariadb-client/0.log" Nov 26 16:58:17 crc kubenswrapper[5010]: I1126 16:58:17.845013 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:17 crc kubenswrapper[5010]: I1126 16:58:17.849887 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:17 crc kubenswrapper[5010]: I1126 16:58:17.896559 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8vgh\" (UniqueName: \"kubernetes.io/projected/690319d1-1c73-44c0-89b9-ad9c32144ae5-kube-api-access-n8vgh\") pod \"690319d1-1c73-44c0-89b9-ad9c32144ae5\" (UID: \"690319d1-1c73-44c0-89b9-ad9c32144ae5\") " Nov 26 16:58:17 crc kubenswrapper[5010]: I1126 16:58:17.901686 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/690319d1-1c73-44c0-89b9-ad9c32144ae5-kube-api-access-n8vgh" (OuterVolumeSpecName: "kube-api-access-n8vgh") pod "690319d1-1c73-44c0-89b9-ad9c32144ae5" (UID: "690319d1-1c73-44c0-89b9-ad9c32144ae5"). InnerVolumeSpecName "kube-api-access-n8vgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:58:17 crc kubenswrapper[5010]: I1126 16:58:17.999129 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8vgh\" (UniqueName: \"kubernetes.io/projected/690319d1-1c73-44c0-89b9-ad9c32144ae5-kube-api-access-n8vgh\") on node \"crc\" DevicePath \"\"" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.037318 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:18 crc kubenswrapper[5010]: E1126 16:58:18.038141 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="690319d1-1c73-44c0-89b9-ad9c32144ae5" containerName="mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.038241 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="690319d1-1c73-44c0-89b9-ad9c32144ae5" containerName="mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.038482 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="690319d1-1c73-44c0-89b9-ad9c32144ae5" containerName="mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.039071 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.047614 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.201940 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qhqk\" (UniqueName: \"kubernetes.io/projected/db492161-35ab-4f80-a358-29bda0822b37-kube-api-access-5qhqk\") pod \"mariadb-client\" (UID: \"db492161-35ab-4f80-a358-29bda0822b37\") " pod="openstack/mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.304258 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qhqk\" (UniqueName: \"kubernetes.io/projected/db492161-35ab-4f80-a358-29bda0822b37-kube-api-access-5qhqk\") pod \"mariadb-client\" (UID: \"db492161-35ab-4f80-a358-29bda0822b37\") " pod="openstack/mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.323583 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qhqk\" (UniqueName: \"kubernetes.io/projected/db492161-35ab-4f80-a358-29bda0822b37-kube-api-access-5qhqk\") pod \"mariadb-client\" (UID: \"db492161-35ab-4f80-a358-29bda0822b37\") " pod="openstack/mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.363973 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.408583 5010 scope.go:117] "RemoveContainer" containerID="5aaff49027cce3d27cf849f5178f4accf08e1dab06d73faef78574d7f3571bef" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.408674 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:18 crc kubenswrapper[5010]: I1126 16:58:18.838851 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:18 crc kubenswrapper[5010]: W1126 16:58:18.844227 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb492161_35ab_4f80_a358_29bda0822b37.slice/crio-6b9c0826340dd4f5bfc1cc1066e290375b5443456a47384b67369e52f45c33aa WatchSource:0}: Error finding container 6b9c0826340dd4f5bfc1cc1066e290375b5443456a47384b67369e52f45c33aa: Status 404 returned error can't find the container with id 6b9c0826340dd4f5bfc1cc1066e290375b5443456a47384b67369e52f45c33aa Nov 26 16:58:19 crc kubenswrapper[5010]: I1126 16:58:19.422337 5010 generic.go:334] "Generic (PLEG): container finished" podID="db492161-35ab-4f80-a358-29bda0822b37" containerID="507ccf248c0ca3a91f14c401102529f83801842c5b0f81464d0c00c40ffa8b3d" exitCode=0 Nov 26 16:58:19 crc kubenswrapper[5010]: I1126 16:58:19.422385 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"db492161-35ab-4f80-a358-29bda0822b37","Type":"ContainerDied","Data":"507ccf248c0ca3a91f14c401102529f83801842c5b0f81464d0c00c40ffa8b3d"} Nov 26 16:58:19 crc kubenswrapper[5010]: I1126 16:58:19.422408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-client" event={"ID":"db492161-35ab-4f80-a358-29bda0822b37","Type":"ContainerStarted","Data":"6b9c0826340dd4f5bfc1cc1066e290375b5443456a47384b67369e52f45c33aa"} Nov 26 16:58:19 crc kubenswrapper[5010]: I1126 16:58:19.907151 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="690319d1-1c73-44c0-89b9-ad9c32144ae5" path="/var/lib/kubelet/pods/690319d1-1c73-44c0-89b9-ad9c32144ae5/volumes" Nov 26 16:58:20 crc kubenswrapper[5010]: I1126 16:58:20.748834 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:20 crc kubenswrapper[5010]: I1126 16:58:20.768473 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_mariadb-client_db492161-35ab-4f80-a358-29bda0822b37/mariadb-client/0.log" Nov 26 16:58:20 crc kubenswrapper[5010]: I1126 16:58:20.804443 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:20 crc kubenswrapper[5010]: I1126 16:58:20.813699 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-client"] Nov 26 16:58:20 crc kubenswrapper[5010]: I1126 16:58:20.848469 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qhqk\" (UniqueName: \"kubernetes.io/projected/db492161-35ab-4f80-a358-29bda0822b37-kube-api-access-5qhqk\") pod \"db492161-35ab-4f80-a358-29bda0822b37\" (UID: \"db492161-35ab-4f80-a358-29bda0822b37\") " Nov 26 16:58:20 crc kubenswrapper[5010]: I1126 16:58:20.855253 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db492161-35ab-4f80-a358-29bda0822b37-kube-api-access-5qhqk" (OuterVolumeSpecName: "kube-api-access-5qhqk") pod "db492161-35ab-4f80-a358-29bda0822b37" (UID: "db492161-35ab-4f80-a358-29bda0822b37"). InnerVolumeSpecName "kube-api-access-5qhqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:58:20 crc kubenswrapper[5010]: I1126 16:58:20.951047 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qhqk\" (UniqueName: \"kubernetes.io/projected/db492161-35ab-4f80-a358-29bda0822b37-kube-api-access-5qhqk\") on node \"crc\" DevicePath \"\"" Nov 26 16:58:21 crc kubenswrapper[5010]: I1126 16:58:21.443835 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b9c0826340dd4f5bfc1cc1066e290375b5443456a47384b67369e52f45c33aa" Nov 26 16:58:21 crc kubenswrapper[5010]: I1126 16:58:21.443936 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-client" Nov 26 16:58:21 crc kubenswrapper[5010]: I1126 16:58:21.903755 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db492161-35ab-4f80-a358-29bda0822b37" path="/var/lib/kubelet/pods/db492161-35ab-4f80-a358-29bda0822b37/volumes" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.533139 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 16:58:58 crc kubenswrapper[5010]: E1126 16:58:58.534149 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db492161-35ab-4f80-a358-29bda0822b37" containerName="mariadb-client" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.534167 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="db492161-35ab-4f80-a358-29bda0822b37" containerName="mariadb-client" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.534385 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="db492161-35ab-4f80-a358-29bda0822b37" containerName="mariadb-client" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.535448 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.537975 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-mpc4k" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.538433 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.539001 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.553934 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.554316 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.600135 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.612869 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.615006 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.620176 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.622508 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.636615 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.644008 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.657868 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.657909 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ffb280d-8fa7-48c5-9407-42a21ac5b021-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.657933 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.658125 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8ffb280d-8fa7-48c5-9407-42a21ac5b021-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.658264 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9h5b9\" (UniqueName: \"kubernetes.io/projected/8ffb280d-8fa7-48c5-9407-42a21ac5b021-kube-api-access-9h5b9\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.658407 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ffb280d-8fa7-48c5-9407-42a21ac5b021-config\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.658442 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.658479 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.760543 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ffb280d-8fa7-48c5-9407-42a21ac5b021-config\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.760613 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.760640 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.760691 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.760743 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c1b878d-4a1d-4d47-b4cf-b366607c8631-config\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.760785 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.760999 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ffb280d-8fa7-48c5-9407-42a21ac5b021-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761030 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761083 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761136 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761158 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-config\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761676 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761763 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8ffb280d-8fa7-48c5-9407-42a21ac5b021-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761830 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c1b878d-4a1d-4d47-b4cf-b366607c8631-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761854 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhz8b\" (UniqueName: \"kubernetes.io/projected/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-kube-api-access-jhz8b\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761898 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761947 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.761972 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c1b878d-4a1d-4d47-b4cf-b366607c8631-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762017 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762037 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4cfm\" (UniqueName: \"kubernetes.io/projected/0c1b878d-4a1d-4d47-b4cf-b366607c8631-kube-api-access-q4cfm\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762062 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762102 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9h5b9\" (UniqueName: \"kubernetes.io/projected/8ffb280d-8fa7-48c5-9407-42a21ac5b021-kube-api-access-9h5b9\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762153 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762181 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762470 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8ffb280d-8fa7-48c5-9407-42a21ac5b021-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.762969 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ffb280d-8fa7-48c5-9407-42a21ac5b021-config\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.763169 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8ffb280d-8fa7-48c5-9407-42a21ac5b021-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.763294 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.763326 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/b2666f159ad887bbe10f5e358f04c1eaa921d22db9d839f91a75de4b80abbe87/globalmount\"" pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.767760 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.768330 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.774491 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8ffb280d-8fa7-48c5-9407-42a21ac5b021-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.786695 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9h5b9\" (UniqueName: \"kubernetes.io/projected/8ffb280d-8fa7-48c5-9407-42a21ac5b021-kube-api-access-9h5b9\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.796499 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-e36a2a8d-98ca-4882-861d-dc03dff8ccdb\") pod \"ovsdbserver-nb-0\" (UID: \"8ffb280d-8fa7-48c5-9407-42a21ac5b021\") " pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863233 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c1b878d-4a1d-4d47-b4cf-b366607c8631-config\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863305 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863332 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863350 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-config\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863374 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863402 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c1b878d-4a1d-4d47-b4cf-b366607c8631-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863422 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhz8b\" (UniqueName: \"kubernetes.io/projected/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-kube-api-access-jhz8b\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863438 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863461 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863478 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c1b878d-4a1d-4d47-b4cf-b366607c8631-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863502 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863517 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4cfm\" (UniqueName: \"kubernetes.io/projected/0c1b878d-4a1d-4d47-b4cf-b366607c8631-kube-api-access-q4cfm\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863532 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863553 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863573 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.863610 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.864252 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c1b878d-4a1d-4d47-b4cf-b366607c8631-config\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.864429 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-ovsdb-rundir\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.864831 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0c1b878d-4a1d-4d47-b4cf-b366607c8631-ovsdb-rundir\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.865572 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-scripts\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.865626 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0c1b878d-4a1d-4d47-b4cf-b366607c8631-scripts\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.865893 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-config\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.867065 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.867260 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.867296 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/3601caf8a896bae963f3a5688cc9145a9aff94d176a407284b02a62ea163438c/globalmount\"" pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.867382 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.867427 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/f015f4e39e35ac4dd961d56c5925627a39cf240b9a918fbc34561676aff8dd67/globalmount\"" pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.867884 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.869587 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.869980 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-combined-ca-bundle\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.873052 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-combined-ca-bundle\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.876518 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0c1b878d-4a1d-4d47-b4cf-b366607c8631-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.888471 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhz8b\" (UniqueName: \"kubernetes.io/projected/891a879a-7cb6-44cc-ac0a-05656b5a0ed0-kube-api-access-jhz8b\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.893358 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4cfm\" (UniqueName: \"kubernetes.io/projected/0c1b878d-4a1d-4d47-b4cf-b366607c8631-kube-api-access-q4cfm\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.894339 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.895311 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-cea941be-9eb5-4e4c-bcea-7966d50dc0ab\") pod \"ovsdbserver-nb-2\" (UID: \"0c1b878d-4a1d-4d47-b4cf-b366607c8631\") " pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.900997 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-7bfc4bf5-d5d7-4755-9d48-7b73dae133bb\") pod \"ovsdbserver-nb-1\" (UID: \"891a879a-7cb6-44cc-ac0a-05656b5a0ed0\") " pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.935454 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-2" Nov 26 16:58:58 crc kubenswrapper[5010]: I1126 16:58:58.957370 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-1" Nov 26 16:58:59 crc kubenswrapper[5010]: I1126 16:58:59.511977 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 26 16:58:59 crc kubenswrapper[5010]: I1126 16:58:59.602872 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-1"] Nov 26 16:58:59 crc kubenswrapper[5010]: W1126 16:58:59.605051 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod891a879a_7cb6_44cc_ac0a_05656b5a0ed0.slice/crio-d2a0233e36986766b8f0a5c5fe9e58cb473f1df2ada06c0b8aa08a1d7c769913 WatchSource:0}: Error finding container d2a0233e36986766b8f0a5c5fe9e58cb473f1df2ada06c0b8aa08a1d7c769913: Status 404 returned error can't find the container with id d2a0233e36986766b8f0a5c5fe9e58cb473f1df2ada06c0b8aa08a1d7c769913 Nov 26 16:58:59 crc kubenswrapper[5010]: I1126 16:58:59.805119 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8ffb280d-8fa7-48c5-9407-42a21ac5b021","Type":"ContainerStarted","Data":"72e7e6cd050eac3b384720b37518dc3d6cdb28924e959afcc905e3de5c4b96d6"} Nov 26 16:58:59 crc kubenswrapper[5010]: I1126 16:58:59.805509 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8ffb280d-8fa7-48c5-9407-42a21ac5b021","Type":"ContainerStarted","Data":"6b509f60bba4e586ae5b16cb300f837bde3263d0a10173f2852786d49140361d"} Nov 26 16:58:59 crc kubenswrapper[5010]: I1126 16:58:59.812348 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"891a879a-7cb6-44cc-ac0a-05656b5a0ed0","Type":"ContainerStarted","Data":"0a41c24180d5188c5f7d2e6fed33805028d4ef9ac35003a1aa8f37ef4992b764"} Nov 26 16:58:59 crc kubenswrapper[5010]: I1126 16:58:59.812411 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"891a879a-7cb6-44cc-ac0a-05656b5a0ed0","Type":"ContainerStarted","Data":"d2a0233e36986766b8f0a5c5fe9e58cb473f1df2ada06c0b8aa08a1d7c769913"} Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.226000 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-2"] Nov 26 16:59:00 crc kubenswrapper[5010]: W1126 16:59:00.227642 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0c1b878d_4a1d_4d47_b4cf_b366607c8631.slice/crio-6879f3557be37bdffff444efe4c686bfa4e59674c471a0ad635c6a2e9e871d69 WatchSource:0}: Error finding container 6879f3557be37bdffff444efe4c686bfa4e59674c471a0ad635c6a2e9e871d69: Status 404 returned error can't find the container with id 6879f3557be37bdffff444efe4c686bfa4e59674c471a0ad635c6a2e9e871d69 Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.385134 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.387153 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.390631 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.390704 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.391230 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-f84q4" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.391576 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.409679 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.433649 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.435289 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.444390 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.454038 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.457118 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.486939 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.487851 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.487898 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.487962 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8nr8\" (UniqueName: \"kubernetes.io/projected/7ecc2ae9-7598-47f5-a481-967ef5353ff4-kube-api-access-w8nr8\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.487998 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-config\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.488031 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wx67b\" (UniqueName: \"kubernetes.io/projected/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-kube-api-access-wx67b\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.488075 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7ecc2ae9-7598-47f5-a481-967ef5353ff4-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.488207 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.488270 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.488317 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.488355 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.489457 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.489551 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.489626 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ecc2ae9-7598-47f5-a481-967ef5353ff4-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.490515 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ecc2ae9-7598-47f5-a481-967ef5353ff4-config\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.490569 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.497665 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593803 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593855 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593883 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ecc2ae9-7598-47f5-a481-967ef5353ff4-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593917 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ecc2ae9-7598-47f5-a481-967ef5353ff4-config\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593934 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593951 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593966 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.593981 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594008 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594028 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51e8e82f-9164-482c-a05e-556960a05d88-config\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594050 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8nr8\" (UniqueName: \"kubernetes.io/projected/7ecc2ae9-7598-47f5-a481-967ef5353ff4-kube-api-access-w8nr8\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594067 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wx67b\" (UniqueName: \"kubernetes.io/projected/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-kube-api-access-wx67b\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594083 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-config\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594103 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7ecc2ae9-7598-47f5-a481-967ef5353ff4-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594125 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51e8e82f-9164-482c-a05e-556960a05d88-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594145 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594168 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594185 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgzd7\" (UniqueName: \"kubernetes.io/projected/51e8e82f-9164-482c-a05e-556960a05d88-kube-api-access-xgzd7\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594207 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51e8e82f-9164-482c-a05e-556960a05d88-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594222 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594238 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594270 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594291 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.594311 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.595000 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/7ecc2ae9-7598-47f5-a481-967ef5353ff4-ovsdb-rundir\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.595475 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-config\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.596161 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.596655 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ecc2ae9-7598-47f5-a481-967ef5353ff4-config\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.596769 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.596943 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7ecc2ae9-7598-47f5-a481-967ef5353ff4-scripts\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.598110 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.598109 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.599390 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.599435 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-combined-ca-bundle\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.600204 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.600290 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.600313 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.600336 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/068cd0271ee2f9b154a973cc639f67601148af81ad8915d6f4825e1b31a5abad/globalmount\"" pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.600314 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/adf66d402c8d3e15d8c2fb2abdcbe5e0bb9c440f834fa6913ed385476330025f/globalmount\"" pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.604199 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7ecc2ae9-7598-47f5-a481-967ef5353ff4-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.617968 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8nr8\" (UniqueName: \"kubernetes.io/projected/7ecc2ae9-7598-47f5-a481-967ef5353ff4-kube-api-access-w8nr8\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.620376 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wx67b\" (UniqueName: \"kubernetes.io/projected/b1f50f96-b58f-4e55-ae75-3324dd5cdc76-kube-api-access-wx67b\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.632365 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d475e0f0-fe8a-478e-8c40-d601eea347cd\") pod \"ovsdbserver-sb-0\" (UID: \"b1f50f96-b58f-4e55-ae75-3324dd5cdc76\") " pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.644716 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-ac0321ff-7f3f-412a-96e6-dd41fd2ecd28\") pod \"ovsdbserver-sb-2\" (UID: \"7ecc2ae9-7598-47f5-a481-967ef5353ff4\") " pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695355 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695408 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51e8e82f-9164-482c-a05e-556960a05d88-config\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695453 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51e8e82f-9164-482c-a05e-556960a05d88-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695478 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695505 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgzd7\" (UniqueName: \"kubernetes.io/projected/51e8e82f-9164-482c-a05e-556960a05d88-kube-api-access-xgzd7\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695528 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51e8e82f-9164-482c-a05e-556960a05d88-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695544 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.695589 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.696618 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51e8e82f-9164-482c-a05e-556960a05d88-ovsdb-rundir\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.697255 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51e8e82f-9164-482c-a05e-556960a05d88-config\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.697676 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51e8e82f-9164-482c-a05e-556960a05d88-scripts\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.697997 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.698033 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/35c3b1ec80592e64b4e6411c3d2703b5e8e2429a6360070a314dd0449959f60e/globalmount\"" pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.699750 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.701258 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.701567 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51e8e82f-9164-482c-a05e-556960a05d88-combined-ca-bundle\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.714703 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.715490 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgzd7\" (UniqueName: \"kubernetes.io/projected/51e8e82f-9164-482c-a05e-556960a05d88-kube-api-access-xgzd7\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.730029 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-2476c2f5-4039-455f-a1a4-eb1eb0bc0405\") pod \"ovsdbserver-sb-1\" (UID: \"51e8e82f-9164-482c-a05e-556960a05d88\") " pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.752600 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.792339 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.835840 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8ffb280d-8fa7-48c5-9407-42a21ac5b021","Type":"ContainerStarted","Data":"9268decde4f6859ee294f46755568d53212f986fbd5cd50530b265501f3cae64"} Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.840476 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"0c1b878d-4a1d-4d47-b4cf-b366607c8631","Type":"ContainerStarted","Data":"faf590acdd492d4d275dd9100a7442dce5e3315a402d690ada0aaee51c8d0ddb"} Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.840517 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"0c1b878d-4a1d-4d47-b4cf-b366607c8631","Type":"ContainerStarted","Data":"42cf9bd478d4e44fba67b1bd38b413f7f100a4a7421c05c64f9fe3c544359ec4"} Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.840527 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-2" event={"ID":"0c1b878d-4a1d-4d47-b4cf-b366607c8631","Type":"ContainerStarted","Data":"6879f3557be37bdffff444efe4c686bfa4e59674c471a0ad635c6a2e9e871d69"} Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.843991 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-1" event={"ID":"891a879a-7cb6-44cc-ac0a-05656b5a0ed0","Type":"ContainerStarted","Data":"30c52234840b002b6ff9b7235867c34f40b20af2943c14a52031601b4a3ec672"} Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.887215 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=3.887187751 podStartE2EDuration="3.887187751s" podCreationTimestamp="2025-11-26 16:58:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:00.85497971 +0000 UTC m=+5561.645696878" watchObservedRunningTime="2025-11-26 16:59:00.887187751 +0000 UTC m=+5561.677904899" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.892649 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-1" podStartSLOduration=3.892630637 podStartE2EDuration="3.892630637s" podCreationTimestamp="2025-11-26 16:58:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:00.872763292 +0000 UTC m=+5561.663480460" watchObservedRunningTime="2025-11-26 16:59:00.892630637 +0000 UTC m=+5561.683347795" Nov 26 16:59:00 crc kubenswrapper[5010]: I1126 16:59:00.913495 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-2" podStartSLOduration=3.913477266 podStartE2EDuration="3.913477266s" podCreationTimestamp="2025-11-26 16:58:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:00.900058802 +0000 UTC m=+5561.690775950" watchObservedRunningTime="2025-11-26 16:59:00.913477266 +0000 UTC m=+5561.704194414" Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.220687 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.328468 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-2"] Nov 26 16:59:01 crc kubenswrapper[5010]: W1126 16:59:01.338045 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ecc2ae9_7598_47f5_a481_967ef5353ff4.slice/crio-cb91b8a68315c5d4fbe603765d8bd541942e24b5c25a46dd8fe7bdbb27400f5b WatchSource:0}: Error finding container cb91b8a68315c5d4fbe603765d8bd541942e24b5c25a46dd8fe7bdbb27400f5b: Status 404 returned error can't find the container with id cb91b8a68315c5d4fbe603765d8bd541942e24b5c25a46dd8fe7bdbb27400f5b Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.427613 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-1"] Nov 26 16:59:01 crc kubenswrapper[5010]: W1126 16:59:01.440116 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51e8e82f_9164_482c_a05e_556960a05d88.slice/crio-deb76cc4909d33970ba302991f3541546ff35e94baa2ab7acc120f493ed053ac WatchSource:0}: Error finding container deb76cc4909d33970ba302991f3541546ff35e94baa2ab7acc120f493ed053ac: Status 404 returned error can't find the container with id deb76cc4909d33970ba302991f3541546ff35e94baa2ab7acc120f493ed053ac Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.853859 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"7ecc2ae9-7598-47f5-a481-967ef5353ff4","Type":"ContainerStarted","Data":"260d64dbc7bb6ceea26d2058b2bae3266e9211e76fa453b9e582f928aec6407e"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.854267 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"7ecc2ae9-7598-47f5-a481-967ef5353ff4","Type":"ContainerStarted","Data":"d25ceb095bdb34b8bad79c08b08d36fce6467d83f14046d5990ed17964ddf0cc"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.854285 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-2" event={"ID":"7ecc2ae9-7598-47f5-a481-967ef5353ff4","Type":"ContainerStarted","Data":"cb91b8a68315c5d4fbe603765d8bd541942e24b5c25a46dd8fe7bdbb27400f5b"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.855912 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"51e8e82f-9164-482c-a05e-556960a05d88","Type":"ContainerStarted","Data":"76b4283eaf9f2ff19fe86662edce86021e32f64f7b0c562022725a5d132b2d44"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.855945 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"51e8e82f-9164-482c-a05e-556960a05d88","Type":"ContainerStarted","Data":"d068058c95786f225e6371003ac0b7241bf09ea75479c7b54395e343ebaf29b6"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.855957 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-1" event={"ID":"51e8e82f-9164-482c-a05e-556960a05d88","Type":"ContainerStarted","Data":"deb76cc4909d33970ba302991f3541546ff35e94baa2ab7acc120f493ed053ac"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.857672 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b1f50f96-b58f-4e55-ae75-3324dd5cdc76","Type":"ContainerStarted","Data":"4a77db50ad470bcd8fdceb8b77d776b6d14366fd014e9ada932a353b7937d5dc"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.857738 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b1f50f96-b58f-4e55-ae75-3324dd5cdc76","Type":"ContainerStarted","Data":"a81b29fbeb9a7608a9306c0da0257d3d938e70042773dd6b7f689d20e21c83a9"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.857758 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"b1f50f96-b58f-4e55-ae75-3324dd5cdc76","Type":"ContainerStarted","Data":"da8a50fd4d137a43ff15a48c4738159ee9beacfb80edb4ab5df1e29ce56a5a48"} Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.877903 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-2" podStartSLOduration=2.877864034 podStartE2EDuration="2.877864034s" podCreationTimestamp="2025-11-26 16:58:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:01.87609833 +0000 UTC m=+5562.666815498" watchObservedRunningTime="2025-11-26 16:59:01.877864034 +0000 UTC m=+5562.668581192" Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.899097 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=2.899077901 podStartE2EDuration="2.899077901s" podCreationTimestamp="2025-11-26 16:58:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:01.897937633 +0000 UTC m=+5562.688654781" watchObservedRunningTime="2025-11-26 16:59:01.899077901 +0000 UTC m=+5562.689795049" Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.901926 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.919927 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-1" podStartSLOduration=2.9199129 podStartE2EDuration="2.9199129s" podCreationTimestamp="2025-11-26 16:58:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:01.9155087 +0000 UTC m=+5562.706225868" watchObservedRunningTime="2025-11-26 16:59:01.9199129 +0000 UTC m=+5562.710630048" Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.935557 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-2" Nov 26 16:59:01 crc kubenswrapper[5010]: I1126 16:59:01.957945 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-1" Nov 26 16:59:03 crc kubenswrapper[5010]: I1126 16:59:03.715056 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:03 crc kubenswrapper[5010]: I1126 16:59:03.753200 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:03 crc kubenswrapper[5010]: I1126 16:59:03.793266 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:03 crc kubenswrapper[5010]: I1126 16:59:03.907669 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 26 16:59:03 crc kubenswrapper[5010]: I1126 16:59:03.936396 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-2" Nov 26 16:59:03 crc kubenswrapper[5010]: I1126 16:59:03.957766 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-1" Nov 26 16:59:04 crc kubenswrapper[5010]: I1126 16:59:04.945261 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.004576 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-2" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.008667 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.016076 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-1" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.088396 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-1" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.101535 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-2" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.266749 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5978c5cdf7-qv4tq"] Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.269670 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.274860 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.285484 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5978c5cdf7-qv4tq"] Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.291655 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-dns-svc\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.291708 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-ovsdbserver-nb\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.291775 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-config\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.291806 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c96dg\" (UniqueName: \"kubernetes.io/projected/a5a83894-6323-4a43-b5f0-10fb7203c32b-kube-api-access-c96dg\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.393478 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-dns-svc\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.393520 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-ovsdbserver-nb\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.393551 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-config\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.393574 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c96dg\" (UniqueName: \"kubernetes.io/projected/a5a83894-6323-4a43-b5f0-10fb7203c32b-kube-api-access-c96dg\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.394661 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-dns-svc\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.394806 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-ovsdbserver-nb\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.395218 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-config\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.428351 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c96dg\" (UniqueName: \"kubernetes.io/projected/a5a83894-6323-4a43-b5f0-10fb7203c32b-kube-api-access-c96dg\") pod \"dnsmasq-dns-5978c5cdf7-qv4tq\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.649483 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.715216 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.752934 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.793602 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:05 crc kubenswrapper[5010]: I1126 16:59:05.936675 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5978c5cdf7-qv4tq"] Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.759095 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.809734 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.823697 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.843780 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.858451 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-2" Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.899620 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-1" Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.922562 5010 generic.go:334] "Generic (PLEG): container finished" podID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerID="685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312" exitCode=0 Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.924012 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" event={"ID":"a5a83894-6323-4a43-b5f0-10fb7203c32b","Type":"ContainerDied","Data":"685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312"} Nov 26 16:59:06 crc kubenswrapper[5010]: I1126 16:59:06.924059 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" event={"ID":"a5a83894-6323-4a43-b5f0-10fb7203c32b","Type":"ContainerStarted","Data":"059300c480a43a10c83cc2c7105827228399f2e883be42e6251d46bac06fd05c"} Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.071010 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5978c5cdf7-qv4tq"] Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.098451 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cc6b56df5-xvrlq"] Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.100078 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.107164 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.117761 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cc6b56df5-xvrlq"] Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.136529 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs9xg\" (UniqueName: \"kubernetes.io/projected/40287857-f766-4747-a89a-598b28347738-kube-api-access-rs9xg\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.136579 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-dns-svc\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.136651 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-sb\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.136677 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-nb\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.136744 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-config\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.238157 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-config\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.238216 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs9xg\" (UniqueName: \"kubernetes.io/projected/40287857-f766-4747-a89a-598b28347738-kube-api-access-rs9xg\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.238245 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-dns-svc\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.238349 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-sb\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.238376 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-nb\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.239456 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-config\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.239855 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-sb\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.240007 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-dns-svc\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.240126 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-nb\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.260991 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs9xg\" (UniqueName: \"kubernetes.io/projected/40287857-f766-4747-a89a-598b28347738-kube-api-access-rs9xg\") pod \"dnsmasq-dns-6cc6b56df5-xvrlq\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.436540 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:07 crc kubenswrapper[5010]: W1126 16:59:07.902571 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod40287857_f766_4747_a89a_598b28347738.slice/crio-b34b883f044b24380ecdcc316a6425ee58d75e6bd1d87736e76aa597ff549150 WatchSource:0}: Error finding container b34b883f044b24380ecdcc316a6425ee58d75e6bd1d87736e76aa597ff549150: Status 404 returned error can't find the container with id b34b883f044b24380ecdcc316a6425ee58d75e6bd1d87736e76aa597ff549150 Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.902860 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cc6b56df5-xvrlq"] Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.933599 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" event={"ID":"40287857-f766-4747-a89a-598b28347738","Type":"ContainerStarted","Data":"b34b883f044b24380ecdcc316a6425ee58d75e6bd1d87736e76aa597ff549150"} Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.937533 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" event={"ID":"a5a83894-6323-4a43-b5f0-10fb7203c32b","Type":"ContainerStarted","Data":"63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80"} Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.937755 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" podUID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerName="dnsmasq-dns" containerID="cri-o://63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80" gracePeriod=10 Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.939060 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:07 crc kubenswrapper[5010]: I1126 16:59:07.961733 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" podStartSLOduration=2.961699071 podStartE2EDuration="2.961699071s" podCreationTimestamp="2025-11-26 16:59:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:07.955366573 +0000 UTC m=+5568.746083731" watchObservedRunningTime="2025-11-26 16:59:07.961699071 +0000 UTC m=+5568.752416219" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.355374 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.457618 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-ovsdbserver-nb\") pod \"a5a83894-6323-4a43-b5f0-10fb7203c32b\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.457661 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c96dg\" (UniqueName: \"kubernetes.io/projected/a5a83894-6323-4a43-b5f0-10fb7203c32b-kube-api-access-c96dg\") pod \"a5a83894-6323-4a43-b5f0-10fb7203c32b\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.457727 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-config\") pod \"a5a83894-6323-4a43-b5f0-10fb7203c32b\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.457907 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-dns-svc\") pod \"a5a83894-6323-4a43-b5f0-10fb7203c32b\" (UID: \"a5a83894-6323-4a43-b5f0-10fb7203c32b\") " Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.463447 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5a83894-6323-4a43-b5f0-10fb7203c32b-kube-api-access-c96dg" (OuterVolumeSpecName: "kube-api-access-c96dg") pod "a5a83894-6323-4a43-b5f0-10fb7203c32b" (UID: "a5a83894-6323-4a43-b5f0-10fb7203c32b"). InnerVolumeSpecName "kube-api-access-c96dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.524074 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a5a83894-6323-4a43-b5f0-10fb7203c32b" (UID: "a5a83894-6323-4a43-b5f0-10fb7203c32b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.525018 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a5a83894-6323-4a43-b5f0-10fb7203c32b" (UID: "a5a83894-6323-4a43-b5f0-10fb7203c32b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.546285 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-config" (OuterVolumeSpecName: "config") pod "a5a83894-6323-4a43-b5f0-10fb7203c32b" (UID: "a5a83894-6323-4a43-b5f0-10fb7203c32b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.560766 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.560796 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.560827 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c96dg\" (UniqueName: \"kubernetes.io/projected/a5a83894-6323-4a43-b5f0-10fb7203c32b-kube-api-access-c96dg\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.560837 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a5a83894-6323-4a43-b5f0-10fb7203c32b-config\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.947956 5010 generic.go:334] "Generic (PLEG): container finished" podID="40287857-f766-4747-a89a-598b28347738" containerID="fa9c10c7470155c8b44c47d08fcc86bbbec9230780d9a4938a8279298f88d521" exitCode=0 Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.948024 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" event={"ID":"40287857-f766-4747-a89a-598b28347738","Type":"ContainerDied","Data":"fa9c10c7470155c8b44c47d08fcc86bbbec9230780d9a4938a8279298f88d521"} Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.952652 5010 generic.go:334] "Generic (PLEG): container finished" podID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerID="63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80" exitCode=0 Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.952746 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" event={"ID":"a5a83894-6323-4a43-b5f0-10fb7203c32b","Type":"ContainerDied","Data":"63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80"} Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.953095 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" event={"ID":"a5a83894-6323-4a43-b5f0-10fb7203c32b","Type":"ContainerDied","Data":"059300c480a43a10c83cc2c7105827228399f2e883be42e6251d46bac06fd05c"} Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.953115 5010 scope.go:117] "RemoveContainer" containerID="63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80" Nov 26 16:59:08 crc kubenswrapper[5010]: I1126 16:59:08.952829 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5978c5cdf7-qv4tq" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.048814 5010 scope.go:117] "RemoveContainer" containerID="685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.077180 5010 scope.go:117] "RemoveContainer" containerID="63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80" Nov 26 16:59:09 crc kubenswrapper[5010]: E1126 16:59:09.077943 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80\": container with ID starting with 63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80 not found: ID does not exist" containerID="63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.077982 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80"} err="failed to get container status \"63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80\": rpc error: code = NotFound desc = could not find container \"63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80\": container with ID starting with 63ca063a2a0f6c5b0c8d000dc3a6d383645a0f78cdd90d60eb5f5a4ebdc2aa80 not found: ID does not exist" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.078011 5010 scope.go:117] "RemoveContainer" containerID="685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312" Nov 26 16:59:09 crc kubenswrapper[5010]: E1126 16:59:09.078381 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312\": container with ID starting with 685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312 not found: ID does not exist" containerID="685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.078446 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312"} err="failed to get container status \"685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312\": rpc error: code = NotFound desc = could not find container \"685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312\": container with ID starting with 685ebb4068c8918de728a8dd844940c243064f719eeb796e9dd99e5ad8a8d312 not found: ID does not exist" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.079787 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5978c5cdf7-qv4tq"] Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.085824 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5978c5cdf7-qv4tq"] Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.486446 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-copy-data"] Nov 26 16:59:09 crc kubenswrapper[5010]: E1126 16:59:09.486856 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerName="init" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.486872 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerName="init" Nov 26 16:59:09 crc kubenswrapper[5010]: E1126 16:59:09.486892 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerName="dnsmasq-dns" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.486900 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerName="dnsmasq-dns" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.487113 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5a83894-6323-4a43-b5f0-10fb7203c32b" containerName="dnsmasq-dns" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.487812 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.490170 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovn-data-cert" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.505312 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.595261 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d0a3cda8-d08e-45d1-865c-208d947680ce-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.595343 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkxpr\" (UniqueName: \"kubernetes.io/projected/d0a3cda8-d08e-45d1-865c-208d947680ce-kube-api-access-fkxpr\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.595366 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-d120f006-30d3-481f-a395-7f76aa95832a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.697848 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d0a3cda8-d08e-45d1-865c-208d947680ce-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.698024 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkxpr\" (UniqueName: \"kubernetes.io/projected/d0a3cda8-d08e-45d1-865c-208d947680ce-kube-api-access-fkxpr\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.698072 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-d120f006-30d3-481f-a395-7f76aa95832a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.701181 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.701226 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-d120f006-30d3-481f-a395-7f76aa95832a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/eb4c2a5b1377143f3c96c69910cfaae883205bbf53f1ba22300f054edaafb6f6/globalmount\"" pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.702084 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d0a3cda8-d08e-45d1-865c-208d947680ce-ovn-data-cert\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.734030 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkxpr\" (UniqueName: \"kubernetes.io/projected/d0a3cda8-d08e-45d1-865c-208d947680ce-kube-api-access-fkxpr\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.740086 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-d120f006-30d3-481f-a395-7f76aa95832a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a\") pod \"ovn-copy-data\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.830207 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.907413 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5a83894-6323-4a43-b5f0-10fb7203c32b" path="/var/lib/kubelet/pods/a5a83894-6323-4a43-b5f0-10fb7203c32b/volumes" Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.990472 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" event={"ID":"40287857-f766-4747-a89a-598b28347738","Type":"ContainerStarted","Data":"6974d7e0aa3191782462b813b3df96d59ca21913ae72c1f586e286dcf61c1e32"} Nov 26 16:59:09 crc kubenswrapper[5010]: I1126 16:59:09.992488 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:10 crc kubenswrapper[5010]: I1126 16:59:10.140836 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" podStartSLOduration=3.140799937 podStartE2EDuration="3.140799937s" podCreationTimestamp="2025-11-26 16:59:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:10.015505529 +0000 UTC m=+5570.806222687" watchObservedRunningTime="2025-11-26 16:59:10.140799937 +0000 UTC m=+5570.931517095" Nov 26 16:59:10 crc kubenswrapper[5010]: I1126 16:59:10.144946 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 16:59:10 crc kubenswrapper[5010]: W1126 16:59:10.145139 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0a3cda8_d08e_45d1_865c_208d947680ce.slice/crio-e109c570543ee92c7d62b0fb40661ec30e33c90bc660bf3c735c549b4e8c2d55 WatchSource:0}: Error finding container e109c570543ee92c7d62b0fb40661ec30e33c90bc660bf3c735c549b4e8c2d55: Status 404 returned error can't find the container with id e109c570543ee92c7d62b0fb40661ec30e33c90bc660bf3c735c549b4e8c2d55 Nov 26 16:59:11 crc kubenswrapper[5010]: I1126 16:59:11.007336 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d0a3cda8-d08e-45d1-865c-208d947680ce","Type":"ContainerStarted","Data":"e109c570543ee92c7d62b0fb40661ec30e33c90bc660bf3c735c549b4e8c2d55"} Nov 26 16:59:14 crc kubenswrapper[5010]: I1126 16:59:14.030843 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d0a3cda8-d08e-45d1-865c-208d947680ce","Type":"ContainerStarted","Data":"efe3d255e19a61263284d468fe31ad92be30aa318ef10c050ad6053201e2593d"} Nov 26 16:59:14 crc kubenswrapper[5010]: I1126 16:59:14.052345 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-copy-data" podStartSLOduration=3.056183278 podStartE2EDuration="6.052322356s" podCreationTimestamp="2025-11-26 16:59:08 +0000 UTC" firstStartedPulling="2025-11-26 16:59:10.147599787 +0000 UTC m=+5570.938316935" lastFinishedPulling="2025-11-26 16:59:13.143738865 +0000 UTC m=+5573.934456013" observedRunningTime="2025-11-26 16:59:14.045058435 +0000 UTC m=+5574.835775583" watchObservedRunningTime="2025-11-26 16:59:14.052322356 +0000 UTC m=+5574.843039504" Nov 26 16:59:17 crc kubenswrapper[5010]: I1126 16:59:17.438647 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:17 crc kubenswrapper[5010]: I1126 16:59:17.510269 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-clm2g"] Nov 26 16:59:17 crc kubenswrapper[5010]: I1126 16:59:17.510501 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54564445dc-clm2g" podUID="e54363d2-9825-4825-84de-ed7e85d4c162" containerName="dnsmasq-dns" containerID="cri-o://ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780" gracePeriod=10 Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.075414 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.089999 5010 generic.go:334] "Generic (PLEG): container finished" podID="e54363d2-9825-4825-84de-ed7e85d4c162" containerID="ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780" exitCode=0 Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.090099 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-clm2g" event={"ID":"e54363d2-9825-4825-84de-ed7e85d4c162","Type":"ContainerDied","Data":"ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780"} Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.090149 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54564445dc-clm2g" event={"ID":"e54363d2-9825-4825-84de-ed7e85d4c162","Type":"ContainerDied","Data":"de14b1671747550617891e0e7905054a63e955054aa422c1ac35a24de2c64cee"} Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.090176 5010 scope.go:117] "RemoveContainer" containerID="ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.154603 5010 scope.go:117] "RemoveContainer" containerID="5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.155348 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-config\") pod \"e54363d2-9825-4825-84de-ed7e85d4c162\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.155683 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-dns-svc\") pod \"e54363d2-9825-4825-84de-ed7e85d4c162\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.155794 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d72pw\" (UniqueName: \"kubernetes.io/projected/e54363d2-9825-4825-84de-ed7e85d4c162-kube-api-access-d72pw\") pod \"e54363d2-9825-4825-84de-ed7e85d4c162\" (UID: \"e54363d2-9825-4825-84de-ed7e85d4c162\") " Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.167055 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e54363d2-9825-4825-84de-ed7e85d4c162-kube-api-access-d72pw" (OuterVolumeSpecName: "kube-api-access-d72pw") pod "e54363d2-9825-4825-84de-ed7e85d4c162" (UID: "e54363d2-9825-4825-84de-ed7e85d4c162"). InnerVolumeSpecName "kube-api-access-d72pw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.221345 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e54363d2-9825-4825-84de-ed7e85d4c162" (UID: "e54363d2-9825-4825-84de-ed7e85d4c162"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.237021 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-config" (OuterVolumeSpecName: "config") pod "e54363d2-9825-4825-84de-ed7e85d4c162" (UID: "e54363d2-9825-4825-84de-ed7e85d4c162"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.245208 5010 scope.go:117] "RemoveContainer" containerID="ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780" Nov 26 16:59:18 crc kubenswrapper[5010]: E1126 16:59:18.245670 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780\": container with ID starting with ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780 not found: ID does not exist" containerID="ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.245700 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780"} err="failed to get container status \"ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780\": rpc error: code = NotFound desc = could not find container \"ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780\": container with ID starting with ade70785b58d4a2c2aeb136e27dd6d329cb94389fbdfddddec5805358af45780 not found: ID does not exist" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.245764 5010 scope.go:117] "RemoveContainer" containerID="5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e" Nov 26 16:59:18 crc kubenswrapper[5010]: E1126 16:59:18.246126 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e\": container with ID starting with 5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e not found: ID does not exist" containerID="5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.246212 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e"} err="failed to get container status \"5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e\": rpc error: code = NotFound desc = could not find container \"5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e\": container with ID starting with 5da3cf40ae6676953f976e46091e0d9bb8f43654e27b3192180fd7e9e86fde9e not found: ID does not exist" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.257124 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.257151 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d72pw\" (UniqueName: \"kubernetes.io/projected/e54363d2-9825-4825-84de-ed7e85d4c162-kube-api-access-d72pw\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:18 crc kubenswrapper[5010]: I1126 16:59:18.257161 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e54363d2-9825-4825-84de-ed7e85d4c162-config\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.102025 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54564445dc-clm2g" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.144560 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-clm2g"] Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.151932 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54564445dc-clm2g"] Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.544167 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 26 16:59:19 crc kubenswrapper[5010]: E1126 16:59:19.544542 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e54363d2-9825-4825-84de-ed7e85d4c162" containerName="init" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.544558 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e54363d2-9825-4825-84de-ed7e85d4c162" containerName="init" Nov 26 16:59:19 crc kubenswrapper[5010]: E1126 16:59:19.544575 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e54363d2-9825-4825-84de-ed7e85d4c162" containerName="dnsmasq-dns" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.544583 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e54363d2-9825-4825-84de-ed7e85d4c162" containerName="dnsmasq-dns" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.544805 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e54363d2-9825-4825-84de-ed7e85d4c162" containerName="dnsmasq-dns" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.545800 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.548164 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-2zq4x" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.548411 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.548533 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.548892 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.572092 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.578386 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.578455 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-config\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.578567 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.578680 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b8rb7\" (UniqueName: \"kubernetes.io/projected/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-kube-api-access-b8rb7\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.578750 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.578909 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.579053 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-scripts\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.680889 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.680959 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-config\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.681003 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.681079 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b8rb7\" (UniqueName: \"kubernetes.io/projected/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-kube-api-access-b8rb7\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.681105 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.681193 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.681255 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-scripts\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.681817 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.682276 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-scripts\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.682369 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-config\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.685343 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.685760 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.685831 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.699064 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b8rb7\" (UniqueName: \"kubernetes.io/projected/75cbf5ad-ffb6-4a24-abe6-1b495c404f08-kube-api-access-b8rb7\") pod \"ovn-northd-0\" (UID: \"75cbf5ad-ffb6-4a24-abe6-1b495c404f08\") " pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.870164 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 26 16:59:19 crc kubenswrapper[5010]: I1126 16:59:19.902396 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e54363d2-9825-4825-84de-ed7e85d4c162" path="/var/lib/kubelet/pods/e54363d2-9825-4825-84de-ed7e85d4c162/volumes" Nov 26 16:59:20 crc kubenswrapper[5010]: I1126 16:59:20.361124 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 26 16:59:20 crc kubenswrapper[5010]: W1126 16:59:20.364337 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75cbf5ad_ffb6_4a24_abe6_1b495c404f08.slice/crio-5a34b66cfac680355ad37cb9e1cb0ce04242861a10d08bac7c5543409122a85c WatchSource:0}: Error finding container 5a34b66cfac680355ad37cb9e1cb0ce04242861a10d08bac7c5543409122a85c: Status 404 returned error can't find the container with id 5a34b66cfac680355ad37cb9e1cb0ce04242861a10d08bac7c5543409122a85c Nov 26 16:59:21 crc kubenswrapper[5010]: I1126 16:59:21.125021 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"75cbf5ad-ffb6-4a24-abe6-1b495c404f08","Type":"ContainerStarted","Data":"142234f28c8da2a7ad2f268cd49afbf72585cd92dc19e58c40e5ab844a871ced"} Nov 26 16:59:21 crc kubenswrapper[5010]: I1126 16:59:21.125647 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 26 16:59:21 crc kubenswrapper[5010]: I1126 16:59:21.125670 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"75cbf5ad-ffb6-4a24-abe6-1b495c404f08","Type":"ContainerStarted","Data":"59ac2babf4e3b098c188de3459a7665f9715f1c504e96f40a7a0835e3bd8b555"} Nov 26 16:59:21 crc kubenswrapper[5010]: I1126 16:59:21.125691 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"75cbf5ad-ffb6-4a24-abe6-1b495c404f08","Type":"ContainerStarted","Data":"5a34b66cfac680355ad37cb9e1cb0ce04242861a10d08bac7c5543409122a85c"} Nov 26 16:59:21 crc kubenswrapper[5010]: I1126 16:59:21.157932 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.157904749 podStartE2EDuration="2.157904749s" podCreationTimestamp="2025-11-26 16:59:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:21.14350591 +0000 UTC m=+5581.934223058" watchObservedRunningTime="2025-11-26 16:59:21.157904749 +0000 UTC m=+5581.948621937" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.180435 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-d61b-account-create-update-hprj5"] Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.182209 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.186658 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.188005 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-gwm66"] Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.190100 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.195690 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-gwm66"] Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.204550 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d61b-account-create-update-hprj5"] Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.274629 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mbrcz\" (UniqueName: \"kubernetes.io/projected/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-kube-api-access-mbrcz\") pod \"keystone-d61b-account-create-update-hprj5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.275018 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-operator-scripts\") pod \"keystone-d61b-account-create-update-hprj5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.275269 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-operator-scripts\") pod \"keystone-db-create-gwm66\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.275373 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf7p7\" (UniqueName: \"kubernetes.io/projected/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-kube-api-access-zf7p7\") pod \"keystone-db-create-gwm66\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.376263 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mbrcz\" (UniqueName: \"kubernetes.io/projected/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-kube-api-access-mbrcz\") pod \"keystone-d61b-account-create-update-hprj5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.376299 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-operator-scripts\") pod \"keystone-d61b-account-create-update-hprj5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.376376 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-operator-scripts\") pod \"keystone-db-create-gwm66\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.376412 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf7p7\" (UniqueName: \"kubernetes.io/projected/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-kube-api-access-zf7p7\") pod \"keystone-db-create-gwm66\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.377397 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-operator-scripts\") pod \"keystone-d61b-account-create-update-hprj5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.377678 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-operator-scripts\") pod \"keystone-db-create-gwm66\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.400872 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mbrcz\" (UniqueName: \"kubernetes.io/projected/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-kube-api-access-mbrcz\") pod \"keystone-d61b-account-create-update-hprj5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.406472 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf7p7\" (UniqueName: \"kubernetes.io/projected/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-kube-api-access-zf7p7\") pod \"keystone-db-create-gwm66\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.520305 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:25 crc kubenswrapper[5010]: I1126 16:59:25.527356 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:26 crc kubenswrapper[5010]: I1126 16:59:26.008463 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-gwm66"] Nov 26 16:59:26 crc kubenswrapper[5010]: I1126 16:59:26.073233 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-d61b-account-create-update-hprj5"] Nov 26 16:59:26 crc kubenswrapper[5010]: W1126 16:59:26.079652 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8ee05ae_14ec_43eb_930d_69c06e67a4d5.slice/crio-ec63ca728445e612e979ab6299b3376f63d65b655e58950cee992b53a9ed16c7 WatchSource:0}: Error finding container ec63ca728445e612e979ab6299b3376f63d65b655e58950cee992b53a9ed16c7: Status 404 returned error can't find the container with id ec63ca728445e612e979ab6299b3376f63d65b655e58950cee992b53a9ed16c7 Nov 26 16:59:26 crc kubenswrapper[5010]: I1126 16:59:26.179658 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gwm66" event={"ID":"38d78a1f-0bd8-415c-a6b6-1158112ec0d9","Type":"ContainerStarted","Data":"4dc82a0bb6b47e6648ef25e0051f8bed4e90c5aac3696f942217269b1f1b4449"} Nov 26 16:59:26 crc kubenswrapper[5010]: I1126 16:59:26.180581 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d61b-account-create-update-hprj5" event={"ID":"f8ee05ae-14ec-43eb-930d-69c06e67a4d5","Type":"ContainerStarted","Data":"ec63ca728445e612e979ab6299b3376f63d65b655e58950cee992b53a9ed16c7"} Nov 26 16:59:27 crc kubenswrapper[5010]: I1126 16:59:27.191081 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gwm66" event={"ID":"38d78a1f-0bd8-415c-a6b6-1158112ec0d9","Type":"ContainerStarted","Data":"0446aebb1fa399da149bd9602b95ef5ff3303b9b9216da9fd26093ae055b58ea"} Nov 26 16:59:27 crc kubenswrapper[5010]: I1126 16:59:27.192731 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d61b-account-create-update-hprj5" event={"ID":"f8ee05ae-14ec-43eb-930d-69c06e67a4d5","Type":"ContainerStarted","Data":"c936a1ea7e79564b5a3b3e925e771dc887132c9bc0eb90f35c5462c5c1e68aea"} Nov 26 16:59:27 crc kubenswrapper[5010]: I1126 16:59:27.221565 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-gwm66" podStartSLOduration=2.221536492 podStartE2EDuration="2.221536492s" podCreationTimestamp="2025-11-26 16:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:27.211062831 +0000 UTC m=+5588.001779999" watchObservedRunningTime="2025-11-26 16:59:27.221536492 +0000 UTC m=+5588.012253670" Nov 26 16:59:27 crc kubenswrapper[5010]: I1126 16:59:27.231529 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-d61b-account-create-update-hprj5" podStartSLOduration=2.23150573 podStartE2EDuration="2.23150573s" podCreationTimestamp="2025-11-26 16:59:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:27.227725016 +0000 UTC m=+5588.018442184" watchObservedRunningTime="2025-11-26 16:59:27.23150573 +0000 UTC m=+5588.022222918" Nov 26 16:59:28 crc kubenswrapper[5010]: I1126 16:59:28.200886 5010 generic.go:334] "Generic (PLEG): container finished" podID="38d78a1f-0bd8-415c-a6b6-1158112ec0d9" containerID="0446aebb1fa399da149bd9602b95ef5ff3303b9b9216da9fd26093ae055b58ea" exitCode=0 Nov 26 16:59:28 crc kubenswrapper[5010]: I1126 16:59:28.200968 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gwm66" event={"ID":"38d78a1f-0bd8-415c-a6b6-1158112ec0d9","Type":"ContainerDied","Data":"0446aebb1fa399da149bd9602b95ef5ff3303b9b9216da9fd26093ae055b58ea"} Nov 26 16:59:28 crc kubenswrapper[5010]: I1126 16:59:28.203059 5010 generic.go:334] "Generic (PLEG): container finished" podID="f8ee05ae-14ec-43eb-930d-69c06e67a4d5" containerID="c936a1ea7e79564b5a3b3e925e771dc887132c9bc0eb90f35c5462c5c1e68aea" exitCode=0 Nov 26 16:59:28 crc kubenswrapper[5010]: I1126 16:59:28.203111 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d61b-account-create-update-hprj5" event={"ID":"f8ee05ae-14ec-43eb-930d-69c06e67a4d5","Type":"ContainerDied","Data":"c936a1ea7e79564b5a3b3e925e771dc887132c9bc0eb90f35c5462c5c1e68aea"} Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.621336 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.628344 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.757808 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mbrcz\" (UniqueName: \"kubernetes.io/projected/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-kube-api-access-mbrcz\") pod \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.757921 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-operator-scripts\") pod \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\" (UID: \"f8ee05ae-14ec-43eb-930d-69c06e67a4d5\") " Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.757954 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-operator-scripts\") pod \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.758085 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zf7p7\" (UniqueName: \"kubernetes.io/projected/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-kube-api-access-zf7p7\") pod \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\" (UID: \"38d78a1f-0bd8-415c-a6b6-1158112ec0d9\") " Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.759187 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "38d78a1f-0bd8-415c-a6b6-1158112ec0d9" (UID: "38d78a1f-0bd8-415c-a6b6-1158112ec0d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.759187 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f8ee05ae-14ec-43eb-930d-69c06e67a4d5" (UID: "f8ee05ae-14ec-43eb-930d-69c06e67a4d5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.763486 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-kube-api-access-zf7p7" (OuterVolumeSpecName: "kube-api-access-zf7p7") pod "38d78a1f-0bd8-415c-a6b6-1158112ec0d9" (UID: "38d78a1f-0bd8-415c-a6b6-1158112ec0d9"). InnerVolumeSpecName "kube-api-access-zf7p7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.763837 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-kube-api-access-mbrcz" (OuterVolumeSpecName: "kube-api-access-mbrcz") pod "f8ee05ae-14ec-43eb-930d-69c06e67a4d5" (UID: "f8ee05ae-14ec-43eb-930d-69c06e67a4d5"). InnerVolumeSpecName "kube-api-access-mbrcz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.860244 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zf7p7\" (UniqueName: \"kubernetes.io/projected/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-kube-api-access-zf7p7\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.860601 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mbrcz\" (UniqueName: \"kubernetes.io/projected/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-kube-api-access-mbrcz\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.860862 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38d78a1f-0bd8-415c-a6b6-1158112ec0d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:29 crc kubenswrapper[5010]: I1126 16:59:29.861002 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f8ee05ae-14ec-43eb-930d-69c06e67a4d5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:30 crc kubenswrapper[5010]: I1126 16:59:30.224459 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-gwm66" event={"ID":"38d78a1f-0bd8-415c-a6b6-1158112ec0d9","Type":"ContainerDied","Data":"4dc82a0bb6b47e6648ef25e0051f8bed4e90c5aac3696f942217269b1f1b4449"} Nov 26 16:59:30 crc kubenswrapper[5010]: I1126 16:59:30.224503 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4dc82a0bb6b47e6648ef25e0051f8bed4e90c5aac3696f942217269b1f1b4449" Nov 26 16:59:30 crc kubenswrapper[5010]: I1126 16:59:30.224475 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-gwm66" Nov 26 16:59:30 crc kubenswrapper[5010]: I1126 16:59:30.226926 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-d61b-account-create-update-hprj5" event={"ID":"f8ee05ae-14ec-43eb-930d-69c06e67a4d5","Type":"ContainerDied","Data":"ec63ca728445e612e979ab6299b3376f63d65b655e58950cee992b53a9ed16c7"} Nov 26 16:59:30 crc kubenswrapper[5010]: I1126 16:59:30.226953 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec63ca728445e612e979ab6299b3376f63d65b655e58950cee992b53a9ed16c7" Nov 26 16:59:30 crc kubenswrapper[5010]: I1126 16:59:30.227000 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-d61b-account-create-update-hprj5" Nov 26 16:59:34 crc kubenswrapper[5010]: I1126 16:59:34.965235 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.753794 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-bffnt"] Nov 26 16:59:35 crc kubenswrapper[5010]: E1126 16:59:35.754178 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8ee05ae-14ec-43eb-930d-69c06e67a4d5" containerName="mariadb-account-create-update" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.754201 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8ee05ae-14ec-43eb-930d-69c06e67a4d5" containerName="mariadb-account-create-update" Nov 26 16:59:35 crc kubenswrapper[5010]: E1126 16:59:35.754249 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38d78a1f-0bd8-415c-a6b6-1158112ec0d9" containerName="mariadb-database-create" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.754257 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="38d78a1f-0bd8-415c-a6b6-1158112ec0d9" containerName="mariadb-database-create" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.754449 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="38d78a1f-0bd8-415c-a6b6-1158112ec0d9" containerName="mariadb-database-create" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.754483 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8ee05ae-14ec-43eb-930d-69c06e67a4d5" containerName="mariadb-account-create-update" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.755213 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.757809 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.757944 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.758010 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-442gj" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.758218 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.768585 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-bffnt"] Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.867533 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-combined-ca-bundle\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.867590 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-config-data\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.867671 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lwbpv\" (UniqueName: \"kubernetes.io/projected/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-kube-api-access-lwbpv\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.969304 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-combined-ca-bundle\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.969377 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-config-data\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.969616 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lwbpv\" (UniqueName: \"kubernetes.io/projected/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-kube-api-access-lwbpv\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.976305 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-combined-ca-bundle\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.991213 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-config-data\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:35 crc kubenswrapper[5010]: I1126 16:59:35.994253 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lwbpv\" (UniqueName: \"kubernetes.io/projected/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-kube-api-access-lwbpv\") pod \"keystone-db-sync-bffnt\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:36 crc kubenswrapper[5010]: I1126 16:59:36.084767 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:36 crc kubenswrapper[5010]: I1126 16:59:36.498044 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-bffnt"] Nov 26 16:59:37 crc kubenswrapper[5010]: I1126 16:59:37.293924 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bffnt" event={"ID":"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff","Type":"ContainerStarted","Data":"aab7743c11c6ef865798bf865aefa47f7eeffc1b0b85d86acf5f153684df6ad5"} Nov 26 16:59:37 crc kubenswrapper[5010]: I1126 16:59:37.294268 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bffnt" event={"ID":"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff","Type":"ContainerStarted","Data":"e5fa8db603a014934719b7963b0d95df4fe8000a2906e2f676d3c4d7877b1418"} Nov 26 16:59:37 crc kubenswrapper[5010]: I1126 16:59:37.331364 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-bffnt" podStartSLOduration=2.331339482 podStartE2EDuration="2.331339482s" podCreationTimestamp="2025-11-26 16:59:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:37.320256686 +0000 UTC m=+5598.110973864" watchObservedRunningTime="2025-11-26 16:59:37.331339482 +0000 UTC m=+5598.122056670" Nov 26 16:59:38 crc kubenswrapper[5010]: I1126 16:59:38.307196 5010 generic.go:334] "Generic (PLEG): container finished" podID="3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" containerID="aab7743c11c6ef865798bf865aefa47f7eeffc1b0b85d86acf5f153684df6ad5" exitCode=0 Nov 26 16:59:38 crc kubenswrapper[5010]: I1126 16:59:38.307297 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bffnt" event={"ID":"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff","Type":"ContainerDied","Data":"aab7743c11c6ef865798bf865aefa47f7eeffc1b0b85d86acf5f153684df6ad5"} Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.707354 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.839523 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-config-data\") pod \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.839678 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lwbpv\" (UniqueName: \"kubernetes.io/projected/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-kube-api-access-lwbpv\") pod \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.839947 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-combined-ca-bundle\") pod \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\" (UID: \"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff\") " Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.845667 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-kube-api-access-lwbpv" (OuterVolumeSpecName: "kube-api-access-lwbpv") pod "3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" (UID: "3cf03ab2-653c-44b2-b9f8-6ad3de0800ff"). InnerVolumeSpecName "kube-api-access-lwbpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.868439 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" (UID: "3cf03ab2-653c-44b2-b9f8-6ad3de0800ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.882878 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-config-data" (OuterVolumeSpecName: "config-data") pod "3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" (UID: "3cf03ab2-653c-44b2-b9f8-6ad3de0800ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.942162 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.942203 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:39 crc kubenswrapper[5010]: I1126 16:59:39.942217 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lwbpv\" (UniqueName: \"kubernetes.io/projected/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff-kube-api-access-lwbpv\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.329066 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-bffnt" event={"ID":"3cf03ab2-653c-44b2-b9f8-6ad3de0800ff","Type":"ContainerDied","Data":"e5fa8db603a014934719b7963b0d95df4fe8000a2906e2f676d3c4d7877b1418"} Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.329129 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5fa8db603a014934719b7963b0d95df4fe8000a2906e2f676d3c4d7877b1418" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.329135 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-bffnt" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.620008 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-r2l5j"] Nov 26 16:59:40 crc kubenswrapper[5010]: E1126 16:59:40.620319 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" containerName="keystone-db-sync" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.620335 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" containerName="keystone-db-sync" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.620499 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" containerName="keystone-db-sync" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.621098 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.623820 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.625696 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-442gj" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.625942 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.628627 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.628877 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.631361 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d9b4bcd5-swhgr"] Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.632682 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.652126 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-r2l5j"] Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.661237 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d9b4bcd5-swhgr"] Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755557 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-scripts\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755631 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-sb\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755701 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-combined-ca-bundle\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755793 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kktgr\" (UniqueName: \"kubernetes.io/projected/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-kube-api-access-kktgr\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755825 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-config-data\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755864 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-config\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755888 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-fernet-keys\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.755920 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrwvl\" (UniqueName: \"kubernetes.io/projected/c09fed7e-f21a-4c60-9714-f80d9c7501d0-kube-api-access-xrwvl\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.756060 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-dns-svc\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.756183 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-nb\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.756246 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-credential-keys\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.858150 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrwvl\" (UniqueName: \"kubernetes.io/projected/c09fed7e-f21a-4c60-9714-f80d9c7501d0-kube-api-access-xrwvl\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.858199 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-dns-svc\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.858235 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-nb\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.858262 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-credential-keys\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.858291 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-scripts\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.858324 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-sb\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.858350 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-combined-ca-bundle\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.859222 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kktgr\" (UniqueName: \"kubernetes.io/projected/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-kube-api-access-kktgr\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.859244 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-config-data\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.859272 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-config\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.859267 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-dns-svc\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.859267 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-sb\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.859285 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-fernet-keys\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.859611 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-nb\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.860086 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-config\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.862316 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-credential-keys\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.862566 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-scripts\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.862623 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-combined-ca-bundle\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.863424 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-config-data\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.863911 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-fernet-keys\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.877984 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kktgr\" (UniqueName: \"kubernetes.io/projected/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-kube-api-access-kktgr\") pod \"dnsmasq-dns-57d9b4bcd5-swhgr\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.879276 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrwvl\" (UniqueName: \"kubernetes.io/projected/c09fed7e-f21a-4c60-9714-f80d9c7501d0-kube-api-access-xrwvl\") pod \"keystone-bootstrap-r2l5j\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.948628 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:40 crc kubenswrapper[5010]: I1126 16:59:40.956331 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:41 crc kubenswrapper[5010]: I1126 16:59:41.416928 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d9b4bcd5-swhgr"] Nov 26 16:59:41 crc kubenswrapper[5010]: I1126 16:59:41.423147 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 16:59:41 crc kubenswrapper[5010]: I1126 16:59:41.423208 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 16:59:41 crc kubenswrapper[5010]: W1126 16:59:41.432022 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8dbf07e2_4d29_427e_acfb_6e607e5d6f9d.slice/crio-2794a5a467ce31b8c0fe5da0b554ecab1cce51ee6dd7da8b9071738840751f78 WatchSource:0}: Error finding container 2794a5a467ce31b8c0fe5da0b554ecab1cce51ee6dd7da8b9071738840751f78: Status 404 returned error can't find the container with id 2794a5a467ce31b8c0fe5da0b554ecab1cce51ee6dd7da8b9071738840751f78 Nov 26 16:59:41 crc kubenswrapper[5010]: I1126 16:59:41.492186 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-r2l5j"] Nov 26 16:59:41 crc kubenswrapper[5010]: W1126 16:59:41.498292 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc09fed7e_f21a_4c60_9714_f80d9c7501d0.slice/crio-9fd1a913fed29ac95a29bfb9666c4b03f084cc6e6dfd32e14714402c6d4848ae WatchSource:0}: Error finding container 9fd1a913fed29ac95a29bfb9666c4b03f084cc6e6dfd32e14714402c6d4848ae: Status 404 returned error can't find the container with id 9fd1a913fed29ac95a29bfb9666c4b03f084cc6e6dfd32e14714402c6d4848ae Nov 26 16:59:42 crc kubenswrapper[5010]: I1126 16:59:42.345967 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r2l5j" event={"ID":"c09fed7e-f21a-4c60-9714-f80d9c7501d0","Type":"ContainerStarted","Data":"3d5b121e410410910ea015003d98ef0fc4fdd64704ac34b8aa43f9194347fb52"} Nov 26 16:59:42 crc kubenswrapper[5010]: I1126 16:59:42.346325 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r2l5j" event={"ID":"c09fed7e-f21a-4c60-9714-f80d9c7501d0","Type":"ContainerStarted","Data":"9fd1a913fed29ac95a29bfb9666c4b03f084cc6e6dfd32e14714402c6d4848ae"} Nov 26 16:59:42 crc kubenswrapper[5010]: I1126 16:59:42.347777 5010 generic.go:334] "Generic (PLEG): container finished" podID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerID="7c4f8413a2af61b16b13663a350314a5104f11968f9b9a9ec80d3f58dee0be28" exitCode=0 Nov 26 16:59:42 crc kubenswrapper[5010]: I1126 16:59:42.347846 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" event={"ID":"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d","Type":"ContainerDied","Data":"7c4f8413a2af61b16b13663a350314a5104f11968f9b9a9ec80d3f58dee0be28"} Nov 26 16:59:42 crc kubenswrapper[5010]: I1126 16:59:42.347873 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" event={"ID":"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d","Type":"ContainerStarted","Data":"2794a5a467ce31b8c0fe5da0b554ecab1cce51ee6dd7da8b9071738840751f78"} Nov 26 16:59:42 crc kubenswrapper[5010]: I1126 16:59:42.377943 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-r2l5j" podStartSLOduration=2.377923436 podStartE2EDuration="2.377923436s" podCreationTimestamp="2025-11-26 16:59:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:42.371126907 +0000 UTC m=+5603.161844055" watchObservedRunningTime="2025-11-26 16:59:42.377923436 +0000 UTC m=+5603.168640584" Nov 26 16:59:43 crc kubenswrapper[5010]: I1126 16:59:43.360781 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" event={"ID":"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d","Type":"ContainerStarted","Data":"c35b879200ae7deacde750d0c4d9662644a9f5cead1e81dcfb2dbcf9f8fbf84e"} Nov 26 16:59:43 crc kubenswrapper[5010]: I1126 16:59:43.383005 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" podStartSLOduration=3.382986198 podStartE2EDuration="3.382986198s" podCreationTimestamp="2025-11-26 16:59:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:43.380634869 +0000 UTC m=+5604.171352027" watchObservedRunningTime="2025-11-26 16:59:43.382986198 +0000 UTC m=+5604.173703346" Nov 26 16:59:44 crc kubenswrapper[5010]: I1126 16:59:44.369145 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:45 crc kubenswrapper[5010]: I1126 16:59:45.393451 5010 generic.go:334] "Generic (PLEG): container finished" podID="c09fed7e-f21a-4c60-9714-f80d9c7501d0" containerID="3d5b121e410410910ea015003d98ef0fc4fdd64704ac34b8aa43f9194347fb52" exitCode=0 Nov 26 16:59:45 crc kubenswrapper[5010]: I1126 16:59:45.393542 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r2l5j" event={"ID":"c09fed7e-f21a-4c60-9714-f80d9c7501d0","Type":"ContainerDied","Data":"3d5b121e410410910ea015003d98ef0fc4fdd64704ac34b8aa43f9194347fb52"} Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.794081 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.867582 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-scripts\") pod \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.867815 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-credential-keys\") pod \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.868496 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrwvl\" (UniqueName: \"kubernetes.io/projected/c09fed7e-f21a-4c60-9714-f80d9c7501d0-kube-api-access-xrwvl\") pod \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.868661 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-combined-ca-bundle\") pod \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.868758 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-fernet-keys\") pod \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.868819 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-config-data\") pod \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\" (UID: \"c09fed7e-f21a-4c60-9714-f80d9c7501d0\") " Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.874886 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c09fed7e-f21a-4c60-9714-f80d9c7501d0" (UID: "c09fed7e-f21a-4c60-9714-f80d9c7501d0"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.875003 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c09fed7e-f21a-4c60-9714-f80d9c7501d0-kube-api-access-xrwvl" (OuterVolumeSpecName: "kube-api-access-xrwvl") pod "c09fed7e-f21a-4c60-9714-f80d9c7501d0" (UID: "c09fed7e-f21a-4c60-9714-f80d9c7501d0"). InnerVolumeSpecName "kube-api-access-xrwvl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.875241 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c09fed7e-f21a-4c60-9714-f80d9c7501d0" (UID: "c09fed7e-f21a-4c60-9714-f80d9c7501d0"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.876734 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-scripts" (OuterVolumeSpecName: "scripts") pod "c09fed7e-f21a-4c60-9714-f80d9c7501d0" (UID: "c09fed7e-f21a-4c60-9714-f80d9c7501d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.897500 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c09fed7e-f21a-4c60-9714-f80d9c7501d0" (UID: "c09fed7e-f21a-4c60-9714-f80d9c7501d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.898251 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-config-data" (OuterVolumeSpecName: "config-data") pod "c09fed7e-f21a-4c60-9714-f80d9c7501d0" (UID: "c09fed7e-f21a-4c60-9714-f80d9c7501d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.971479 5010 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.971843 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.971861 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.971870 5010 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.971882 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrwvl\" (UniqueName: \"kubernetes.io/projected/c09fed7e-f21a-4c60-9714-f80d9c7501d0-kube-api-access-xrwvl\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:46 crc kubenswrapper[5010]: I1126 16:59:46.971891 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c09fed7e-f21a-4c60-9714-f80d9c7501d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.429481 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-r2l5j" event={"ID":"c09fed7e-f21a-4c60-9714-f80d9c7501d0","Type":"ContainerDied","Data":"9fd1a913fed29ac95a29bfb9666c4b03f084cc6e6dfd32e14714402c6d4848ae"} Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.429527 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9fd1a913fed29ac95a29bfb9666c4b03f084cc6e6dfd32e14714402c6d4848ae" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.429540 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-r2l5j" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.595151 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-r2l5j"] Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.604581 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-r2l5j"] Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.701330 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-sjln8"] Nov 26 16:59:47 crc kubenswrapper[5010]: E1126 16:59:47.701923 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c09fed7e-f21a-4c60-9714-f80d9c7501d0" containerName="keystone-bootstrap" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.701957 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c09fed7e-f21a-4c60-9714-f80d9c7501d0" containerName="keystone-bootstrap" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.702268 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c09fed7e-f21a-4c60-9714-f80d9c7501d0" containerName="keystone-bootstrap" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.703234 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.705427 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.705475 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.705849 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.706089 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.709553 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-sjln8"] Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.717771 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-442gj" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.786059 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-scripts\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.786117 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-fernet-keys\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.786249 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-combined-ca-bundle\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.786303 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-credential-keys\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.786384 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfmst\" (UniqueName: \"kubernetes.io/projected/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-kube-api-access-xfmst\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.786435 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-config-data\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.887698 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfmst\" (UniqueName: \"kubernetes.io/projected/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-kube-api-access-xfmst\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.888880 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-config-data\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.889036 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-scripts\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.889195 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-fernet-keys\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.889464 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-combined-ca-bundle\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.889915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-credential-keys\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.893776 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-scripts\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.894213 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-config-data\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.896031 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-combined-ca-bundle\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.901817 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-fernet-keys\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.903117 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-credential-keys\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.907495 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfmst\" (UniqueName: \"kubernetes.io/projected/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-kube-api-access-xfmst\") pod \"keystone-bootstrap-sjln8\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:47 crc kubenswrapper[5010]: I1126 16:59:47.913343 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c09fed7e-f21a-4c60-9714-f80d9c7501d0" path="/var/lib/kubelet/pods/c09fed7e-f21a-4c60-9714-f80d9c7501d0/volumes" Nov 26 16:59:48 crc kubenswrapper[5010]: I1126 16:59:48.031872 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:48 crc kubenswrapper[5010]: I1126 16:59:48.519585 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-sjln8"] Nov 26 16:59:49 crc kubenswrapper[5010]: I1126 16:59:49.450347 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sjln8" event={"ID":"5ae3a95e-b0ee-4483-b4e2-86f2824386d2","Type":"ContainerStarted","Data":"9541f30bfd2a5bc53a5577924e54d858d24f1d77e4911ec177d4d853416cd676"} Nov 26 16:59:49 crc kubenswrapper[5010]: I1126 16:59:49.450693 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sjln8" event={"ID":"5ae3a95e-b0ee-4483-b4e2-86f2824386d2","Type":"ContainerStarted","Data":"3991c44c0ad902e480de54a7b587a1e7ffc08689293031b485068c2c3e4a31c6"} Nov 26 16:59:49 crc kubenswrapper[5010]: I1126 16:59:49.472130 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-sjln8" podStartSLOduration=2.472107015 podStartE2EDuration="2.472107015s" podCreationTimestamp="2025-11-26 16:59:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:49.470547817 +0000 UTC m=+5610.261265035" watchObservedRunningTime="2025-11-26 16:59:49.472107015 +0000 UTC m=+5610.262824193" Nov 26 16:59:50 crc kubenswrapper[5010]: I1126 16:59:50.957920 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.023266 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cc6b56df5-xvrlq"] Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.023544 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" podUID="40287857-f766-4747-a89a-598b28347738" containerName="dnsmasq-dns" containerID="cri-o://6974d7e0aa3191782462b813b3df96d59ca21913ae72c1f586e286dcf61c1e32" gracePeriod=10 Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.467457 5010 generic.go:334] "Generic (PLEG): container finished" podID="40287857-f766-4747-a89a-598b28347738" containerID="6974d7e0aa3191782462b813b3df96d59ca21913ae72c1f586e286dcf61c1e32" exitCode=0 Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.467546 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" event={"ID":"40287857-f766-4747-a89a-598b28347738","Type":"ContainerDied","Data":"6974d7e0aa3191782462b813b3df96d59ca21913ae72c1f586e286dcf61c1e32"} Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.467767 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" event={"ID":"40287857-f766-4747-a89a-598b28347738","Type":"ContainerDied","Data":"b34b883f044b24380ecdcc316a6425ee58d75e6bd1d87736e76aa597ff549150"} Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.467783 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b34b883f044b24380ecdcc316a6425ee58d75e6bd1d87736e76aa597ff549150" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.537776 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.664356 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-sb\") pod \"40287857-f766-4747-a89a-598b28347738\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.664405 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-config\") pod \"40287857-f766-4747-a89a-598b28347738\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.664473 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-nb\") pod \"40287857-f766-4747-a89a-598b28347738\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.664512 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rs9xg\" (UniqueName: \"kubernetes.io/projected/40287857-f766-4747-a89a-598b28347738-kube-api-access-rs9xg\") pod \"40287857-f766-4747-a89a-598b28347738\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.664563 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-dns-svc\") pod \"40287857-f766-4747-a89a-598b28347738\" (UID: \"40287857-f766-4747-a89a-598b28347738\") " Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.670131 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40287857-f766-4747-a89a-598b28347738-kube-api-access-rs9xg" (OuterVolumeSpecName: "kube-api-access-rs9xg") pod "40287857-f766-4747-a89a-598b28347738" (UID: "40287857-f766-4747-a89a-598b28347738"). InnerVolumeSpecName "kube-api-access-rs9xg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.705363 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "40287857-f766-4747-a89a-598b28347738" (UID: "40287857-f766-4747-a89a-598b28347738"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.708296 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "40287857-f766-4747-a89a-598b28347738" (UID: "40287857-f766-4747-a89a-598b28347738"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.710408 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-config" (OuterVolumeSpecName: "config") pod "40287857-f766-4747-a89a-598b28347738" (UID: "40287857-f766-4747-a89a-598b28347738"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.719742 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "40287857-f766-4747-a89a-598b28347738" (UID: "40287857-f766-4747-a89a-598b28347738"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.767984 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.768016 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-config\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.768027 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.768037 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rs9xg\" (UniqueName: \"kubernetes.io/projected/40287857-f766-4747-a89a-598b28347738-kube-api-access-rs9xg\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:51 crc kubenswrapper[5010]: I1126 16:59:51.768068 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40287857-f766-4747-a89a-598b28347738-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:52 crc kubenswrapper[5010]: I1126 16:59:52.476581 5010 generic.go:334] "Generic (PLEG): container finished" podID="5ae3a95e-b0ee-4483-b4e2-86f2824386d2" containerID="9541f30bfd2a5bc53a5577924e54d858d24f1d77e4911ec177d4d853416cd676" exitCode=0 Nov 26 16:59:52 crc kubenswrapper[5010]: I1126 16:59:52.476661 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sjln8" event={"ID":"5ae3a95e-b0ee-4483-b4e2-86f2824386d2","Type":"ContainerDied","Data":"9541f30bfd2a5bc53a5577924e54d858d24f1d77e4911ec177d4d853416cd676"} Nov 26 16:59:52 crc kubenswrapper[5010]: I1126 16:59:52.476982 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cc6b56df5-xvrlq" Nov 26 16:59:52 crc kubenswrapper[5010]: I1126 16:59:52.518027 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cc6b56df5-xvrlq"] Nov 26 16:59:52 crc kubenswrapper[5010]: I1126 16:59:52.524081 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cc6b56df5-xvrlq"] Nov 26 16:59:53 crc kubenswrapper[5010]: I1126 16:59:53.857634 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:53 crc kubenswrapper[5010]: I1126 16:59:53.904537 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40287857-f766-4747-a89a-598b28347738" path="/var/lib/kubelet/pods/40287857-f766-4747-a89a-598b28347738/volumes" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.028102 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-credential-keys\") pod \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.028168 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-fernet-keys\") pod \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.028193 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-combined-ca-bundle\") pod \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.028222 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-config-data\") pod \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.028295 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-scripts\") pod \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.028313 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xfmst\" (UniqueName: \"kubernetes.io/projected/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-kube-api-access-xfmst\") pod \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\" (UID: \"5ae3a95e-b0ee-4483-b4e2-86f2824386d2\") " Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.033931 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-kube-api-access-xfmst" (OuterVolumeSpecName: "kube-api-access-xfmst") pod "5ae3a95e-b0ee-4483-b4e2-86f2824386d2" (UID: "5ae3a95e-b0ee-4483-b4e2-86f2824386d2"). InnerVolumeSpecName "kube-api-access-xfmst". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.034263 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-scripts" (OuterVolumeSpecName: "scripts") pod "5ae3a95e-b0ee-4483-b4e2-86f2824386d2" (UID: "5ae3a95e-b0ee-4483-b4e2-86f2824386d2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.034504 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5ae3a95e-b0ee-4483-b4e2-86f2824386d2" (UID: "5ae3a95e-b0ee-4483-b4e2-86f2824386d2"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.035460 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5ae3a95e-b0ee-4483-b4e2-86f2824386d2" (UID: "5ae3a95e-b0ee-4483-b4e2-86f2824386d2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.052143 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-config-data" (OuterVolumeSpecName: "config-data") pod "5ae3a95e-b0ee-4483-b4e2-86f2824386d2" (UID: "5ae3a95e-b0ee-4483-b4e2-86f2824386d2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.064454 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5ae3a95e-b0ee-4483-b4e2-86f2824386d2" (UID: "5ae3a95e-b0ee-4483-b4e2-86f2824386d2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.130621 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.130655 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xfmst\" (UniqueName: \"kubernetes.io/projected/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-kube-api-access-xfmst\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.130669 5010 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.130678 5010 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.130688 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.130697 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5ae3a95e-b0ee-4483-b4e2-86f2824386d2-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.511103 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-sjln8" event={"ID":"5ae3a95e-b0ee-4483-b4e2-86f2824386d2","Type":"ContainerDied","Data":"3991c44c0ad902e480de54a7b587a1e7ffc08689293031b485068c2c3e4a31c6"} Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.511149 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3991c44c0ad902e480de54a7b587a1e7ffc08689293031b485068c2c3e4a31c6" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.511193 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-sjln8" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.718974 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-855b4c8bc9-8m6lg"] Nov 26 16:59:54 crc kubenswrapper[5010]: E1126 16:59:54.719859 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40287857-f766-4747-a89a-598b28347738" containerName="dnsmasq-dns" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.719943 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="40287857-f766-4747-a89a-598b28347738" containerName="dnsmasq-dns" Nov 26 16:59:54 crc kubenswrapper[5010]: E1126 16:59:54.720009 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ae3a95e-b0ee-4483-b4e2-86f2824386d2" containerName="keystone-bootstrap" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.720107 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ae3a95e-b0ee-4483-b4e2-86f2824386d2" containerName="keystone-bootstrap" Nov 26 16:59:54 crc kubenswrapper[5010]: E1126 16:59:54.720177 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40287857-f766-4747-a89a-598b28347738" containerName="init" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.720236 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="40287857-f766-4747-a89a-598b28347738" containerName="init" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.720446 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="40287857-f766-4747-a89a-598b28347738" containerName="dnsmasq-dns" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.720542 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ae3a95e-b0ee-4483-b4e2-86f2824386d2" containerName="keystone-bootstrap" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.721231 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.725193 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.725765 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.726083 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.726268 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.726093 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-442gj" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.726088 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.764862 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-855b4c8bc9-8m6lg"] Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845734 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-credential-keys\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845797 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-internal-tls-certs\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845818 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-combined-ca-bundle\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845837 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-config-data\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845884 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-public-tls-certs\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845914 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jljg\" (UniqueName: \"kubernetes.io/projected/f9c273a9-97a8-4386-9f3c-ceca459cc42e-kube-api-access-6jljg\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845929 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-scripts\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.845973 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-fernet-keys\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.947763 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-fernet-keys\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.947867 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-credential-keys\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.947901 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-internal-tls-certs\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.947918 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-combined-ca-bundle\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.947937 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-config-data\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.947983 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-public-tls-certs\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.948029 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jljg\" (UniqueName: \"kubernetes.io/projected/f9c273a9-97a8-4386-9f3c-ceca459cc42e-kube-api-access-6jljg\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.948047 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-scripts\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.952915 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-fernet-keys\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.953079 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-public-tls-certs\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.953119 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-config-data\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.953634 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-combined-ca-bundle\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.953646 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-internal-tls-certs\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.953976 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-scripts\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.957362 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/f9c273a9-97a8-4386-9f3c-ceca459cc42e-credential-keys\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:54 crc kubenswrapper[5010]: I1126 16:59:54.966935 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jljg\" (UniqueName: \"kubernetes.io/projected/f9c273a9-97a8-4386-9f3c-ceca459cc42e-kube-api-access-6jljg\") pod \"keystone-855b4c8bc9-8m6lg\" (UID: \"f9c273a9-97a8-4386-9f3c-ceca459cc42e\") " pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:55 crc kubenswrapper[5010]: I1126 16:59:55.044441 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 16:59:55 crc kubenswrapper[5010]: I1126 16:59:55.521652 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-855b4c8bc9-8m6lg"] Nov 26 16:59:56 crc kubenswrapper[5010]: I1126 16:59:56.533805 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-855b4c8bc9-8m6lg" event={"ID":"f9c273a9-97a8-4386-9f3c-ceca459cc42e","Type":"ContainerStarted","Data":"590fb499ff30d29f8a9019eea665f488f0eeffb2adac5a49383cd5a25312bbe7"} Nov 26 16:59:56 crc kubenswrapper[5010]: I1126 16:59:56.534208 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-855b4c8bc9-8m6lg" event={"ID":"f9c273a9-97a8-4386-9f3c-ceca459cc42e","Type":"ContainerStarted","Data":"d567d5be570c7f918b46e161a110c72c50aebdff4b99b4cec7733a1c1d899127"} Nov 26 16:59:56 crc kubenswrapper[5010]: I1126 16:59:56.535029 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.133737 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-855b4c8bc9-8m6lg" podStartSLOduration=6.133685809 podStartE2EDuration="6.133685809s" podCreationTimestamp="2025-11-26 16:59:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 16:59:56.571140925 +0000 UTC m=+5617.361858153" watchObservedRunningTime="2025-11-26 17:00:00.133685809 +0000 UTC m=+5620.924402987" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.146910 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz"] Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.148233 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.150481 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.150994 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.162611 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz"] Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.236918 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ac8e8a-6f69-486e-b618-a79402db39a6-config-volume\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.237336 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ac8e8a-6f69-486e-b618-a79402db39a6-secret-volume\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.237426 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhghz\" (UniqueName: \"kubernetes.io/projected/d1ac8e8a-6f69-486e-b618-a79402db39a6-kube-api-access-xhghz\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.338496 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhghz\" (UniqueName: \"kubernetes.io/projected/d1ac8e8a-6f69-486e-b618-a79402db39a6-kube-api-access-xhghz\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.338607 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ac8e8a-6f69-486e-b618-a79402db39a6-config-volume\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.338669 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ac8e8a-6f69-486e-b618-a79402db39a6-secret-volume\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.340331 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ac8e8a-6f69-486e-b618-a79402db39a6-config-volume\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.344957 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ac8e8a-6f69-486e-b618-a79402db39a6-secret-volume\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.354630 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhghz\" (UniqueName: \"kubernetes.io/projected/d1ac8e8a-6f69-486e-b618-a79402db39a6-kube-api-access-xhghz\") pod \"collect-profiles-29402940-jhsqz\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.500031 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:00 crc kubenswrapper[5010]: I1126 17:00:00.978031 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz"] Nov 26 17:00:00 crc kubenswrapper[5010]: W1126 17:00:00.983314 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1ac8e8a_6f69_486e_b618_a79402db39a6.slice/crio-a1abeb8a1a4319fc02d17e8ed614c4e90e6366a1686dddbc579ef98d9c5981b2 WatchSource:0}: Error finding container a1abeb8a1a4319fc02d17e8ed614c4e90e6366a1686dddbc579ef98d9c5981b2: Status 404 returned error can't find the container with id a1abeb8a1a4319fc02d17e8ed614c4e90e6366a1686dddbc579ef98d9c5981b2 Nov 26 17:00:01 crc kubenswrapper[5010]: I1126 17:00:01.586828 5010 generic.go:334] "Generic (PLEG): container finished" podID="d1ac8e8a-6f69-486e-b618-a79402db39a6" containerID="3d4f2a476d97e226c5a55dcc072405ad4d81ebc33abf34742124423da193463d" exitCode=0 Nov 26 17:00:01 crc kubenswrapper[5010]: I1126 17:00:01.586901 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" event={"ID":"d1ac8e8a-6f69-486e-b618-a79402db39a6","Type":"ContainerDied","Data":"3d4f2a476d97e226c5a55dcc072405ad4d81ebc33abf34742124423da193463d"} Nov 26 17:00:01 crc kubenswrapper[5010]: I1126 17:00:01.587135 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" event={"ID":"d1ac8e8a-6f69-486e-b618-a79402db39a6","Type":"ContainerStarted","Data":"a1abeb8a1a4319fc02d17e8ed614c4e90e6366a1686dddbc579ef98d9c5981b2"} Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.038476 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.187921 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhghz\" (UniqueName: \"kubernetes.io/projected/d1ac8e8a-6f69-486e-b618-a79402db39a6-kube-api-access-xhghz\") pod \"d1ac8e8a-6f69-486e-b618-a79402db39a6\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.187985 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ac8e8a-6f69-486e-b618-a79402db39a6-config-volume\") pod \"d1ac8e8a-6f69-486e-b618-a79402db39a6\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.188066 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ac8e8a-6f69-486e-b618-a79402db39a6-secret-volume\") pod \"d1ac8e8a-6f69-486e-b618-a79402db39a6\" (UID: \"d1ac8e8a-6f69-486e-b618-a79402db39a6\") " Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.188483 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ac8e8a-6f69-486e-b618-a79402db39a6-config-volume" (OuterVolumeSpecName: "config-volume") pod "d1ac8e8a-6f69-486e-b618-a79402db39a6" (UID: "d1ac8e8a-6f69-486e-b618-a79402db39a6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.192847 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1ac8e8a-6f69-486e-b618-a79402db39a6-kube-api-access-xhghz" (OuterVolumeSpecName: "kube-api-access-xhghz") pod "d1ac8e8a-6f69-486e-b618-a79402db39a6" (UID: "d1ac8e8a-6f69-486e-b618-a79402db39a6"). InnerVolumeSpecName "kube-api-access-xhghz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.192925 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ac8e8a-6f69-486e-b618-a79402db39a6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d1ac8e8a-6f69-486e-b618-a79402db39a6" (UID: "d1ac8e8a-6f69-486e-b618-a79402db39a6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.290789 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ac8e8a-6f69-486e-b618-a79402db39a6-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.290864 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhghz\" (UniqueName: \"kubernetes.io/projected/d1ac8e8a-6f69-486e-b618-a79402db39a6-kube-api-access-xhghz\") on node \"crc\" DevicePath \"\"" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.290891 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ac8e8a-6f69-486e-b618-a79402db39a6-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.608264 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" event={"ID":"d1ac8e8a-6f69-486e-b618-a79402db39a6","Type":"ContainerDied","Data":"a1abeb8a1a4319fc02d17e8ed614c4e90e6366a1686dddbc579ef98d9c5981b2"} Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.608314 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1abeb8a1a4319fc02d17e8ed614c4e90e6366a1686dddbc579ef98d9c5981b2" Nov 26 17:00:03 crc kubenswrapper[5010]: I1126 17:00:03.608376 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz" Nov 26 17:00:04 crc kubenswrapper[5010]: I1126 17:00:04.105557 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts"] Nov 26 17:00:04 crc kubenswrapper[5010]: I1126 17:00:04.113326 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402895-h4mts"] Nov 26 17:00:05 crc kubenswrapper[5010]: I1126 17:00:05.902800 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ea81325-8811-4b0c-90c5-500aaaaada9f" path="/var/lib/kubelet/pods/2ea81325-8811-4b0c-90c5-500aaaaada9f/volumes" Nov 26 17:00:11 crc kubenswrapper[5010]: I1126 17:00:11.422423 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:00:11 crc kubenswrapper[5010]: I1126 17:00:11.423164 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:00:26 crc kubenswrapper[5010]: I1126 17:00:26.582815 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-855b4c8bc9-8m6lg" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.180634 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 17:00:30 crc kubenswrapper[5010]: E1126 17:00:30.181615 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1ac8e8a-6f69-486e-b618-a79402db39a6" containerName="collect-profiles" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.181639 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1ac8e8a-6f69-486e-b618-a79402db39a6" containerName="collect-profiles" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.182056 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1ac8e8a-6f69-486e-b618-a79402db39a6" containerName="collect-profiles" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.183011 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.186497 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-v5vch" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.186628 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.186646 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.206276 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.302036 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.302347 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config-secret\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.302471 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.302605 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc5k4\" (UniqueName: \"kubernetes.io/projected/34a5290e-e2d7-407e-9c56-adedc14140a4-kube-api-access-bc5k4\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.403811 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.403893 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config-secret\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.403919 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.403956 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc5k4\" (UniqueName: \"kubernetes.io/projected/34a5290e-e2d7-407e-9c56-adedc14140a4-kube-api-access-bc5k4\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.405359 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.410097 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.415981 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config-secret\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.422319 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc5k4\" (UniqueName: \"kubernetes.io/projected/34a5290e-e2d7-407e-9c56-adedc14140a4-kube-api-access-bc5k4\") pod \"openstackclient\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " pod="openstack/openstackclient" Nov 26 17:00:30 crc kubenswrapper[5010]: I1126 17:00:30.509318 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:00:31 crc kubenswrapper[5010]: I1126 17:00:31.023525 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 17:00:31 crc kubenswrapper[5010]: I1126 17:00:31.877570 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"34a5290e-e2d7-407e-9c56-adedc14140a4","Type":"ContainerStarted","Data":"4d373c9426554371258c7ba49ae4a0ebc68f14ff98789c572ff83d9adb33d716"} Nov 26 17:00:31 crc kubenswrapper[5010]: I1126 17:00:31.877985 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"34a5290e-e2d7-407e-9c56-adedc14140a4","Type":"ContainerStarted","Data":"2233b229e65fd72397bbd7e1fc4b0126ab6e6eca6bf22ea5a6cf8cbe462d2df7"} Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.423057 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.423673 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.423745 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.424506 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.424575 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" gracePeriod=600 Nov 26 17:00:41 crc kubenswrapper[5010]: E1126 17:00:41.571154 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.970890 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" exitCode=0 Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.970931 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902"} Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.970962 5010 scope.go:117] "RemoveContainer" containerID="9bc507e475d0d8669f1bff93162119416272f1c73d6cf135f83056541ab8c1ac" Nov 26 17:00:41 crc kubenswrapper[5010]: I1126 17:00:41.971556 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:00:41 crc kubenswrapper[5010]: E1126 17:00:41.971984 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:00:42 crc kubenswrapper[5010]: I1126 17:00:42.000926 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=12.000901264 podStartE2EDuration="12.000901264s" podCreationTimestamp="2025-11-26 17:00:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:00:31.896295281 +0000 UTC m=+5652.687012429" watchObservedRunningTime="2025-11-26 17:00:42.000901264 +0000 UTC m=+5662.791618412" Nov 26 17:00:53 crc kubenswrapper[5010]: I1126 17:00:53.892250 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:00:53 crc kubenswrapper[5010]: E1126 17:00:53.893417 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.171928 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29402941-mssk5"] Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.174590 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.208567 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402941-mssk5"] Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.304359 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-config-data\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.304551 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-fernet-keys\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.304647 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-combined-ca-bundle\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.304756 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jmm8\" (UniqueName: \"kubernetes.io/projected/338acf82-ff16-48d1-9cf3-ffbde62f81e6-kube-api-access-4jmm8\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.406915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-config-data\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.406990 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-fernet-keys\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.407027 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-combined-ca-bundle\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.407070 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jmm8\" (UniqueName: \"kubernetes.io/projected/338acf82-ff16-48d1-9cf3-ffbde62f81e6-kube-api-access-4jmm8\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.416546 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-combined-ca-bundle\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.417875 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-fernet-keys\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.420915 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-config-data\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.431022 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jmm8\" (UniqueName: \"kubernetes.io/projected/338acf82-ff16-48d1-9cf3-ffbde62f81e6-kube-api-access-4jmm8\") pod \"keystone-cron-29402941-mssk5\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:00 crc kubenswrapper[5010]: I1126 17:01:00.524197 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:01 crc kubenswrapper[5010]: I1126 17:01:01.015609 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29402941-mssk5"] Nov 26 17:01:01 crc kubenswrapper[5010]: I1126 17:01:01.148238 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402941-mssk5" event={"ID":"338acf82-ff16-48d1-9cf3-ffbde62f81e6","Type":"ContainerStarted","Data":"257d604661435ce4d5b41d6ebe0b8f1f61f2347d3deae2a1e73b71b437340559"} Nov 26 17:01:02 crc kubenswrapper[5010]: I1126 17:01:02.156984 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402941-mssk5" event={"ID":"338acf82-ff16-48d1-9cf3-ffbde62f81e6","Type":"ContainerStarted","Data":"25e20e6bd5ea2caee4a6abd8c50f85fe0e4c744f7350679a4f8bcd786e5dae16"} Nov 26 17:01:02 crc kubenswrapper[5010]: I1126 17:01:02.183741 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29402941-mssk5" podStartSLOduration=2.183690993 podStartE2EDuration="2.183690993s" podCreationTimestamp="2025-11-26 17:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:01:02.177848438 +0000 UTC m=+5682.968565696" watchObservedRunningTime="2025-11-26 17:01:02.183690993 +0000 UTC m=+5682.974408171" Nov 26 17:01:03 crc kubenswrapper[5010]: I1126 17:01:03.168795 5010 generic.go:334] "Generic (PLEG): container finished" podID="338acf82-ff16-48d1-9cf3-ffbde62f81e6" containerID="25e20e6bd5ea2caee4a6abd8c50f85fe0e4c744f7350679a4f8bcd786e5dae16" exitCode=0 Nov 26 17:01:03 crc kubenswrapper[5010]: I1126 17:01:03.168871 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402941-mssk5" event={"ID":"338acf82-ff16-48d1-9cf3-ffbde62f81e6","Type":"ContainerDied","Data":"25e20e6bd5ea2caee4a6abd8c50f85fe0e4c744f7350679a4f8bcd786e5dae16"} Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.584153 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.693119 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jmm8\" (UniqueName: \"kubernetes.io/projected/338acf82-ff16-48d1-9cf3-ffbde62f81e6-kube-api-access-4jmm8\") pod \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.693336 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-fernet-keys\") pod \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.693427 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-combined-ca-bundle\") pod \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.693531 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-config-data\") pod \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\" (UID: \"338acf82-ff16-48d1-9cf3-ffbde62f81e6\") " Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.711838 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "338acf82-ff16-48d1-9cf3-ffbde62f81e6" (UID: "338acf82-ff16-48d1-9cf3-ffbde62f81e6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.716080 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/338acf82-ff16-48d1-9cf3-ffbde62f81e6-kube-api-access-4jmm8" (OuterVolumeSpecName: "kube-api-access-4jmm8") pod "338acf82-ff16-48d1-9cf3-ffbde62f81e6" (UID: "338acf82-ff16-48d1-9cf3-ffbde62f81e6"). InnerVolumeSpecName "kube-api-access-4jmm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.745088 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "338acf82-ff16-48d1-9cf3-ffbde62f81e6" (UID: "338acf82-ff16-48d1-9cf3-ffbde62f81e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.766188 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-config-data" (OuterVolumeSpecName: "config-data") pod "338acf82-ff16-48d1-9cf3-ffbde62f81e6" (UID: "338acf82-ff16-48d1-9cf3-ffbde62f81e6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.797358 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jmm8\" (UniqueName: \"kubernetes.io/projected/338acf82-ff16-48d1-9cf3-ffbde62f81e6-kube-api-access-4jmm8\") on node \"crc\" DevicePath \"\"" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.797388 5010 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.797402 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:01:04 crc kubenswrapper[5010]: I1126 17:01:04.797412 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/338acf82-ff16-48d1-9cf3-ffbde62f81e6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.204213 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29402941-mssk5" event={"ID":"338acf82-ff16-48d1-9cf3-ffbde62f81e6","Type":"ContainerDied","Data":"257d604661435ce4d5b41d6ebe0b8f1f61f2347d3deae2a1e73b71b437340559"} Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.204264 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="257d604661435ce4d5b41d6ebe0b8f1f61f2347d3deae2a1e73b71b437340559" Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.204325 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29402941-mssk5" Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.498438 5010 scope.go:117] "RemoveContainer" containerID="ac806371774bbf315bd728cdf025c34558385d43cdb5a05b329c8f70458f0fc9" Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.538214 5010 scope.go:117] "RemoveContainer" containerID="7a0a67ffd90f2c2fc7f0a041e47adfb1d288e8510fa33e8b7b4a0b327dfc7e6e" Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.598328 5010 scope.go:117] "RemoveContainer" containerID="a864714f865246ab5113f5b1e3e5a26ee62fd9816deddcfcc5dd4bb4a579279b" Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.651409 5010 scope.go:117] "RemoveContainer" containerID="0c2a102da55c1c95f648861c13afe932017037f7a4b66eba50efefa3ef192137" Nov 26 17:01:05 crc kubenswrapper[5010]: I1126 17:01:05.680692 5010 scope.go:117] "RemoveContainer" containerID="33951497e782d288e6726a745cda9e4d9e0b01f1d218bf1851f2bc1e686d026c" Nov 26 17:01:08 crc kubenswrapper[5010]: I1126 17:01:08.892029 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:01:08 crc kubenswrapper[5010]: E1126 17:01:08.892810 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:01:19 crc kubenswrapper[5010]: I1126 17:01:19.905360 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:01:19 crc kubenswrapper[5010]: E1126 17:01:19.906558 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:01:31 crc kubenswrapper[5010]: I1126 17:01:31.892230 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:01:31 crc kubenswrapper[5010]: E1126 17:01:31.893049 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:01:38 crc kubenswrapper[5010]: E1126 17:01:38.858797 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/rpm-ostreed.service\": RecentStats: unable to find data in memory cache]" Nov 26 17:01:45 crc kubenswrapper[5010]: I1126 17:01:45.892994 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:01:45 crc kubenswrapper[5010]: E1126 17:01:45.894373 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:01:58 crc kubenswrapper[5010]: I1126 17:01:58.892093 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:01:58 crc kubenswrapper[5010]: E1126 17:01:58.893209 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.140566 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-kvm86"] Nov 26 17:02:06 crc kubenswrapper[5010]: E1126 17:02:06.141373 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="338acf82-ff16-48d1-9cf3-ffbde62f81e6" containerName="keystone-cron" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.141385 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="338acf82-ff16-48d1-9cf3-ffbde62f81e6" containerName="keystone-cron" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.141537 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="338acf82-ff16-48d1-9cf3-ffbde62f81e6" containerName="keystone-cron" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.142095 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.147973 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-kvm86"] Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.226609 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-93db-account-create-update-5xvw8"] Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.228059 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.232559 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.234070 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-93db-account-create-update-5xvw8"] Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.307561 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb1a8606-74e2-4480-8c23-45610b2761f9-operator-scripts\") pod \"barbican-db-create-kvm86\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.307951 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9828\" (UniqueName: \"kubernetes.io/projected/fb1a8606-74e2-4480-8c23-45610b2761f9-kube-api-access-x9828\") pod \"barbican-db-create-kvm86\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.409580 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n556\" (UniqueName: \"kubernetes.io/projected/94b20deb-30ad-476a-88e4-983c09558bd8-kube-api-access-9n556\") pod \"barbican-93db-account-create-update-5xvw8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.409928 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94b20deb-30ad-476a-88e4-983c09558bd8-operator-scripts\") pod \"barbican-93db-account-create-update-5xvw8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.410066 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb1a8606-74e2-4480-8c23-45610b2761f9-operator-scripts\") pod \"barbican-db-create-kvm86\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.410215 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9828\" (UniqueName: \"kubernetes.io/projected/fb1a8606-74e2-4480-8c23-45610b2761f9-kube-api-access-x9828\") pod \"barbican-db-create-kvm86\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.410986 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb1a8606-74e2-4480-8c23-45610b2761f9-operator-scripts\") pod \"barbican-db-create-kvm86\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.443529 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9828\" (UniqueName: \"kubernetes.io/projected/fb1a8606-74e2-4480-8c23-45610b2761f9-kube-api-access-x9828\") pod \"barbican-db-create-kvm86\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.461303 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.511343 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n556\" (UniqueName: \"kubernetes.io/projected/94b20deb-30ad-476a-88e4-983c09558bd8-kube-api-access-9n556\") pod \"barbican-93db-account-create-update-5xvw8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.511605 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94b20deb-30ad-476a-88e4-983c09558bd8-operator-scripts\") pod \"barbican-93db-account-create-update-5xvw8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.512346 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94b20deb-30ad-476a-88e4-983c09558bd8-operator-scripts\") pod \"barbican-93db-account-create-update-5xvw8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.535951 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n556\" (UniqueName: \"kubernetes.io/projected/94b20deb-30ad-476a-88e4-983c09558bd8-kube-api-access-9n556\") pod \"barbican-93db-account-create-update-5xvw8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.544920 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:06 crc kubenswrapper[5010]: I1126 17:02:06.992335 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-kvm86"] Nov 26 17:02:07 crc kubenswrapper[5010]: W1126 17:02:07.005601 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb1a8606_74e2_4480_8c23_45610b2761f9.slice/crio-369a7d41451ceb3fde529558961b4c17dd289b1f50af26902b23df03095d11b2 WatchSource:0}: Error finding container 369a7d41451ceb3fde529558961b4c17dd289b1f50af26902b23df03095d11b2: Status 404 returned error can't find the container with id 369a7d41451ceb3fde529558961b4c17dd289b1f50af26902b23df03095d11b2 Nov 26 17:02:07 crc kubenswrapper[5010]: I1126 17:02:07.080071 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-93db-account-create-update-5xvw8"] Nov 26 17:02:07 crc kubenswrapper[5010]: I1126 17:02:07.849354 5010 generic.go:334] "Generic (PLEG): container finished" podID="94b20deb-30ad-476a-88e4-983c09558bd8" containerID="8ac5149bc067ad8522373358e51efa7482eee091b169d6ce0826dacb4bdadadc" exitCode=0 Nov 26 17:02:07 crc kubenswrapper[5010]: I1126 17:02:07.849442 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-93db-account-create-update-5xvw8" event={"ID":"94b20deb-30ad-476a-88e4-983c09558bd8","Type":"ContainerDied","Data":"8ac5149bc067ad8522373358e51efa7482eee091b169d6ce0826dacb4bdadadc"} Nov 26 17:02:07 crc kubenswrapper[5010]: I1126 17:02:07.849482 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-93db-account-create-update-5xvw8" event={"ID":"94b20deb-30ad-476a-88e4-983c09558bd8","Type":"ContainerStarted","Data":"4fce07fafce4c16a5c00736d0d5ac190ad46982b21ba79cc18e6c4a61370c47b"} Nov 26 17:02:07 crc kubenswrapper[5010]: I1126 17:02:07.855591 5010 generic.go:334] "Generic (PLEG): container finished" podID="fb1a8606-74e2-4480-8c23-45610b2761f9" containerID="858004dceebc4f3004ebfeca114387f89ecd904d9d305bdd635b1a180f4ee9a8" exitCode=0 Nov 26 17:02:07 crc kubenswrapper[5010]: I1126 17:02:07.855671 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-kvm86" event={"ID":"fb1a8606-74e2-4480-8c23-45610b2761f9","Type":"ContainerDied","Data":"858004dceebc4f3004ebfeca114387f89ecd904d9d305bdd635b1a180f4ee9a8"} Nov 26 17:02:07 crc kubenswrapper[5010]: I1126 17:02:07.855809 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-kvm86" event={"ID":"fb1a8606-74e2-4480-8c23-45610b2761f9","Type":"ContainerStarted","Data":"369a7d41451ceb3fde529558961b4c17dd289b1f50af26902b23df03095d11b2"} Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.280569 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.289634 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.468773 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x9828\" (UniqueName: \"kubernetes.io/projected/fb1a8606-74e2-4480-8c23-45610b2761f9-kube-api-access-x9828\") pod \"fb1a8606-74e2-4480-8c23-45610b2761f9\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.468864 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94b20deb-30ad-476a-88e4-983c09558bd8-operator-scripts\") pod \"94b20deb-30ad-476a-88e4-983c09558bd8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.468934 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9n556\" (UniqueName: \"kubernetes.io/projected/94b20deb-30ad-476a-88e4-983c09558bd8-kube-api-access-9n556\") pod \"94b20deb-30ad-476a-88e4-983c09558bd8\" (UID: \"94b20deb-30ad-476a-88e4-983c09558bd8\") " Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.469046 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb1a8606-74e2-4480-8c23-45610b2761f9-operator-scripts\") pod \"fb1a8606-74e2-4480-8c23-45610b2761f9\" (UID: \"fb1a8606-74e2-4480-8c23-45610b2761f9\") " Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.469436 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/94b20deb-30ad-476a-88e4-983c09558bd8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "94b20deb-30ad-476a-88e4-983c09558bd8" (UID: "94b20deb-30ad-476a-88e4-983c09558bd8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.470010 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb1a8606-74e2-4480-8c23-45610b2761f9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fb1a8606-74e2-4480-8c23-45610b2761f9" (UID: "fb1a8606-74e2-4480-8c23-45610b2761f9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.470396 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/94b20deb-30ad-476a-88e4-983c09558bd8-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.470426 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb1a8606-74e2-4480-8c23-45610b2761f9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.477044 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb1a8606-74e2-4480-8c23-45610b2761f9-kube-api-access-x9828" (OuterVolumeSpecName: "kube-api-access-x9828") pod "fb1a8606-74e2-4480-8c23-45610b2761f9" (UID: "fb1a8606-74e2-4480-8c23-45610b2761f9"). InnerVolumeSpecName "kube-api-access-x9828". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.478155 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/94b20deb-30ad-476a-88e4-983c09558bd8-kube-api-access-9n556" (OuterVolumeSpecName: "kube-api-access-9n556") pod "94b20deb-30ad-476a-88e4-983c09558bd8" (UID: "94b20deb-30ad-476a-88e4-983c09558bd8"). InnerVolumeSpecName "kube-api-access-9n556". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.572292 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x9828\" (UniqueName: \"kubernetes.io/projected/fb1a8606-74e2-4480-8c23-45610b2761f9-kube-api-access-x9828\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.572332 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9n556\" (UniqueName: \"kubernetes.io/projected/94b20deb-30ad-476a-88e4-983c09558bd8-kube-api-access-9n556\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.877070 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-kvm86" event={"ID":"fb1a8606-74e2-4480-8c23-45610b2761f9","Type":"ContainerDied","Data":"369a7d41451ceb3fde529558961b4c17dd289b1f50af26902b23df03095d11b2"} Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.877128 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="369a7d41451ceb3fde529558961b4c17dd289b1f50af26902b23df03095d11b2" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.877239 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-kvm86" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.879397 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-93db-account-create-update-5xvw8" event={"ID":"94b20deb-30ad-476a-88e4-983c09558bd8","Type":"ContainerDied","Data":"4fce07fafce4c16a5c00736d0d5ac190ad46982b21ba79cc18e6c4a61370c47b"} Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.879419 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4fce07fafce4c16a5c00736d0d5ac190ad46982b21ba79cc18e6c4a61370c47b" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.879480 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-93db-account-create-update-5xvw8" Nov 26 17:02:09 crc kubenswrapper[5010]: I1126 17:02:09.903983 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:02:09 crc kubenswrapper[5010]: E1126 17:02:09.904439 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.502393 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-r56n8"] Nov 26 17:02:11 crc kubenswrapper[5010]: E1126 17:02:11.502952 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb1a8606-74e2-4480-8c23-45610b2761f9" containerName="mariadb-database-create" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.502964 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb1a8606-74e2-4480-8c23-45610b2761f9" containerName="mariadb-database-create" Nov 26 17:02:11 crc kubenswrapper[5010]: E1126 17:02:11.503000 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="94b20deb-30ad-476a-88e4-983c09558bd8" containerName="mariadb-account-create-update" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.503006 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="94b20deb-30ad-476a-88e4-983c09558bd8" containerName="mariadb-account-create-update" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.503152 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb1a8606-74e2-4480-8c23-45610b2761f9" containerName="mariadb-database-create" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.503173 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="94b20deb-30ad-476a-88e4-983c09558bd8" containerName="mariadb-account-create-update" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.503654 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.505776 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7gmwq" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.506516 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.512301 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-r56n8"] Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.606815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-combined-ca-bundle\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.607258 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-db-sync-config-data\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.607386 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6v6p\" (UniqueName: \"kubernetes.io/projected/50cda769-1fdf-4bf8-9f93-1ac966b885ab-kube-api-access-p6v6p\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.709471 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-combined-ca-bundle\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.709805 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-db-sync-config-data\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.709858 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6v6p\" (UniqueName: \"kubernetes.io/projected/50cda769-1fdf-4bf8-9f93-1ac966b885ab-kube-api-access-p6v6p\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.717473 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-db-sync-config-data\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.717647 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-combined-ca-bundle\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.728766 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6v6p\" (UniqueName: \"kubernetes.io/projected/50cda769-1fdf-4bf8-9f93-1ac966b885ab-kube-api-access-p6v6p\") pod \"barbican-db-sync-r56n8\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:11 crc kubenswrapper[5010]: I1126 17:02:11.821949 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:12 crc kubenswrapper[5010]: I1126 17:02:12.314007 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-r56n8"] Nov 26 17:02:12 crc kubenswrapper[5010]: I1126 17:02:12.906519 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r56n8" event={"ID":"50cda769-1fdf-4bf8-9f93-1ac966b885ab","Type":"ContainerStarted","Data":"ab3bf55cbefcc6c1b5b29a6a8cee042efff1b282e105f6f8745610a99cc06ae7"} Nov 26 17:02:12 crc kubenswrapper[5010]: I1126 17:02:12.907144 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r56n8" event={"ID":"50cda769-1fdf-4bf8-9f93-1ac966b885ab","Type":"ContainerStarted","Data":"bf3003b9c8dc24c3c702dd175517e4f3a4de878a4b6da799fc4f7e7d9e439f5e"} Nov 26 17:02:12 crc kubenswrapper[5010]: I1126 17:02:12.938197 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-r56n8" podStartSLOduration=1.93817731 podStartE2EDuration="1.93817731s" podCreationTimestamp="2025-11-26 17:02:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:12.927432563 +0000 UTC m=+5753.718149721" watchObservedRunningTime="2025-11-26 17:02:12.93817731 +0000 UTC m=+5753.728894458" Nov 26 17:02:15 crc kubenswrapper[5010]: I1126 17:02:15.934140 5010 generic.go:334] "Generic (PLEG): container finished" podID="50cda769-1fdf-4bf8-9f93-1ac966b885ab" containerID="ab3bf55cbefcc6c1b5b29a6a8cee042efff1b282e105f6f8745610a99cc06ae7" exitCode=0 Nov 26 17:02:15 crc kubenswrapper[5010]: I1126 17:02:15.934775 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r56n8" event={"ID":"50cda769-1fdf-4bf8-9f93-1ac966b885ab","Type":"ContainerDied","Data":"ab3bf55cbefcc6c1b5b29a6a8cee042efff1b282e105f6f8745610a99cc06ae7"} Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.308028 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.488016 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-db-sync-config-data\") pod \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.488142 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-combined-ca-bundle\") pod \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.488224 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6v6p\" (UniqueName: \"kubernetes.io/projected/50cda769-1fdf-4bf8-9f93-1ac966b885ab-kube-api-access-p6v6p\") pod \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\" (UID: \"50cda769-1fdf-4bf8-9f93-1ac966b885ab\") " Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.495449 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "50cda769-1fdf-4bf8-9f93-1ac966b885ab" (UID: "50cda769-1fdf-4bf8-9f93-1ac966b885ab"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.496945 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50cda769-1fdf-4bf8-9f93-1ac966b885ab-kube-api-access-p6v6p" (OuterVolumeSpecName: "kube-api-access-p6v6p") pod "50cda769-1fdf-4bf8-9f93-1ac966b885ab" (UID: "50cda769-1fdf-4bf8-9f93-1ac966b885ab"). InnerVolumeSpecName "kube-api-access-p6v6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.513033 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "50cda769-1fdf-4bf8-9f93-1ac966b885ab" (UID: "50cda769-1fdf-4bf8-9f93-1ac966b885ab"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.590070 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.590096 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6v6p\" (UniqueName: \"kubernetes.io/projected/50cda769-1fdf-4bf8-9f93-1ac966b885ab-kube-api-access-p6v6p\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.590108 5010 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/50cda769-1fdf-4bf8-9f93-1ac966b885ab-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.950608 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-r56n8" event={"ID":"50cda769-1fdf-4bf8-9f93-1ac966b885ab","Type":"ContainerDied","Data":"bf3003b9c8dc24c3c702dd175517e4f3a4de878a4b6da799fc4f7e7d9e439f5e"} Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.950642 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bf3003b9c8dc24c3c702dd175517e4f3a4de878a4b6da799fc4f7e7d9e439f5e" Nov 26 17:02:17 crc kubenswrapper[5010]: I1126 17:02:17.950740 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-r56n8" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.302174 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-68c7b56cb5-899x5"] Nov 26 17:02:18 crc kubenswrapper[5010]: E1126 17:02:18.303371 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50cda769-1fdf-4bf8-9f93-1ac966b885ab" containerName="barbican-db-sync" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.303394 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="50cda769-1fdf-4bf8-9f93-1ac966b885ab" containerName="barbican-db-sync" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.303868 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="50cda769-1fdf-4bf8-9f93-1ac966b885ab" containerName="barbican-db-sync" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.306658 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.334999 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.335397 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.335601 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-7gmwq" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.335802 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6d77f6958-2br52"] Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.340584 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.345455 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.385460 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6d77f6958-2br52"] Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.419517 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-68c7b56cb5-899x5"] Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.420580 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c0404c5-4d78-4319-819d-97858c02ef0e-logs\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.422160 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrh8s\" (UniqueName: \"kubernetes.io/projected/9c0404c5-4d78-4319-819d-97858c02ef0e-kube-api-access-zrh8s\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.422284 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-config-data\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.422477 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-combined-ca-bundle\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.422663 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-config-data-custom\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.457878 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7546845d6c-qp2tb"] Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.459789 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.472198 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7546845d6c-qp2tb"] Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.523947 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c0404c5-4d78-4319-819d-97858c02ef0e-logs\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.523988 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrh8s\" (UniqueName: \"kubernetes.io/projected/9c0404c5-4d78-4319-819d-97858c02ef0e-kube-api-access-zrh8s\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524015 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-config-data\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524062 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-config-data\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524103 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-combined-ca-bundle\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524136 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2f4g7\" (UniqueName: \"kubernetes.io/projected/d6098b4d-083b-4c62-942d-e5fc84af0084-kube-api-access-2f4g7\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524165 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6098b4d-083b-4c62-942d-e5fc84af0084-logs\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524185 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-config-data-custom\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524220 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-combined-ca-bundle\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524252 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-config-data-custom\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.524605 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9c0404c5-4d78-4319-819d-97858c02ef0e-logs\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.531672 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-config-data\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.532204 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-config-data-custom\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.532662 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9c0404c5-4d78-4319-819d-97858c02ef0e-combined-ca-bundle\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.545009 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrh8s\" (UniqueName: \"kubernetes.io/projected/9c0404c5-4d78-4319-819d-97858c02ef0e-kube-api-access-zrh8s\") pod \"barbican-worker-68c7b56cb5-899x5\" (UID: \"9c0404c5-4d78-4319-819d-97858c02ef0e\") " pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.578851 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5dbf4fb94-q764z"] Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.580226 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.583431 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.600954 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5dbf4fb94-q764z"] Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.628751 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6098b4d-083b-4c62-942d-e5fc84af0084-logs\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.628811 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfgtn\" (UniqueName: \"kubernetes.io/projected/2489d4d4-6f5b-466c-9dda-b253dfc9912b-kube-api-access-pfgtn\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.628836 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-dns-svc\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.628877 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-nb\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.628905 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-config\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.628932 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-combined-ca-bundle\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.628981 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-combined-ca-bundle\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.629036 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-config-data-custom\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.629068 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data-custom\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.629077 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6098b4d-083b-4c62-942d-e5fc84af0084-logs\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.629097 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.630343 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44317e44-3a3c-4773-8c44-8b33818d1a58-logs\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.630444 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqp8m\" (UniqueName: \"kubernetes.io/projected/44317e44-3a3c-4773-8c44-8b33818d1a58-kube-api-access-jqp8m\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.630565 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-config-data\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.630670 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2f4g7\" (UniqueName: \"kubernetes.io/projected/d6098b4d-083b-4c62-942d-e5fc84af0084-kube-api-access-2f4g7\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.630733 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-sb\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.631987 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-config-data-custom\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.632104 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-combined-ca-bundle\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.636928 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6098b4d-083b-4c62-942d-e5fc84af0084-config-data\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.647190 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2f4g7\" (UniqueName: \"kubernetes.io/projected/d6098b4d-083b-4c62-942d-e5fc84af0084-kube-api-access-2f4g7\") pod \"barbican-keystone-listener-6d77f6958-2br52\" (UID: \"d6098b4d-083b-4c62-942d-e5fc84af0084\") " pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.655288 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-68c7b56cb5-899x5" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.673252 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6d77f6958-2br52" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.731351 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-sb\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.733753 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfgtn\" (UniqueName: \"kubernetes.io/projected/2489d4d4-6f5b-466c-9dda-b253dfc9912b-kube-api-access-pfgtn\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.733786 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-dns-svc\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.733853 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-nb\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.733887 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-config\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.733942 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-combined-ca-bundle\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.734014 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data-custom\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.734060 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.734092 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44317e44-3a3c-4773-8c44-8b33818d1a58-logs\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.734134 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqp8m\" (UniqueName: \"kubernetes.io/projected/44317e44-3a3c-4773-8c44-8b33818d1a58-kube-api-access-jqp8m\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.733595 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-sb\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.735566 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-dns-svc\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.736228 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-nb\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.738100 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-config\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.741120 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44317e44-3a3c-4773-8c44-8b33818d1a58-logs\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.745452 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data-custom\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.745868 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-combined-ca-bundle\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.747065 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.757513 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfgtn\" (UniqueName: \"kubernetes.io/projected/2489d4d4-6f5b-466c-9dda-b253dfc9912b-kube-api-access-pfgtn\") pod \"dnsmasq-dns-7546845d6c-qp2tb\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.758203 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqp8m\" (UniqueName: \"kubernetes.io/projected/44317e44-3a3c-4773-8c44-8b33818d1a58-kube-api-access-jqp8m\") pod \"barbican-api-5dbf4fb94-q764z\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.779462 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:18 crc kubenswrapper[5010]: I1126 17:02:18.790181 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:19 crc kubenswrapper[5010]: I1126 17:02:19.184075 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-68c7b56cb5-899x5"] Nov 26 17:02:19 crc kubenswrapper[5010]: I1126 17:02:19.293574 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6d77f6958-2br52"] Nov 26 17:02:19 crc kubenswrapper[5010]: W1126 17:02:19.297608 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd6098b4d_083b_4c62_942d_e5fc84af0084.slice/crio-46e40c536025aa9b424814aa994a27bbf556446b5f6a815076d3ee69f9e69e0d WatchSource:0}: Error finding container 46e40c536025aa9b424814aa994a27bbf556446b5f6a815076d3ee69f9e69e0d: Status 404 returned error can't find the container with id 46e40c536025aa9b424814aa994a27bbf556446b5f6a815076d3ee69f9e69e0d Nov 26 17:02:19 crc kubenswrapper[5010]: W1126 17:02:19.373968 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44317e44_3a3c_4773_8c44_8b33818d1a58.slice/crio-06a67e0752cf67997f7ee0954971bb1a03ab4805231301e7f5bb734ff38276e0 WatchSource:0}: Error finding container 06a67e0752cf67997f7ee0954971bb1a03ab4805231301e7f5bb734ff38276e0: Status 404 returned error can't find the container with id 06a67e0752cf67997f7ee0954971bb1a03ab4805231301e7f5bb734ff38276e0 Nov 26 17:02:19 crc kubenswrapper[5010]: I1126 17:02:19.376825 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5dbf4fb94-q764z"] Nov 26 17:02:19 crc kubenswrapper[5010]: I1126 17:02:19.461305 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7546845d6c-qp2tb"] Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.000173 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6d77f6958-2br52" event={"ID":"d6098b4d-083b-4c62-942d-e5fc84af0084","Type":"ContainerStarted","Data":"670510733e7d87acba3aad4d8f4068878f47079edf04ce2f41c5ee5111fedba2"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.000731 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6d77f6958-2br52" event={"ID":"d6098b4d-083b-4c62-942d-e5fc84af0084","Type":"ContainerStarted","Data":"6cabef46fc6ef2458faa18e8fc1a78176ac6b46c87337ba615ba7a3ac951cc07"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.000744 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6d77f6958-2br52" event={"ID":"d6098b4d-083b-4c62-942d-e5fc84af0084","Type":"ContainerStarted","Data":"46e40c536025aa9b424814aa994a27bbf556446b5f6a815076d3ee69f9e69e0d"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.026905 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-68c7b56cb5-899x5" event={"ID":"9c0404c5-4d78-4319-819d-97858c02ef0e","Type":"ContainerStarted","Data":"a470d1f0a641ab44b4260ab47cd335899783100e021e3e29ec356d969d335cfa"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.026951 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-68c7b56cb5-899x5" event={"ID":"9c0404c5-4d78-4319-819d-97858c02ef0e","Type":"ContainerStarted","Data":"4067891e6b0d6c3e5361887ee34b13373d5bfa15dfff7d1241bc71c2c9645f00"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.026961 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-68c7b56cb5-899x5" event={"ID":"9c0404c5-4d78-4319-819d-97858c02ef0e","Type":"ContainerStarted","Data":"688e02f065fd9c5cac9e08a82a020de9af76e168ed29b682c3076a1a2e972cb8"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.035222 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dbf4fb94-q764z" event={"ID":"44317e44-3a3c-4773-8c44-8b33818d1a58","Type":"ContainerStarted","Data":"8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.035268 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dbf4fb94-q764z" event={"ID":"44317e44-3a3c-4773-8c44-8b33818d1a58","Type":"ContainerStarted","Data":"80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.035279 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dbf4fb94-q764z" event={"ID":"44317e44-3a3c-4773-8c44-8b33818d1a58","Type":"ContainerStarted","Data":"06a67e0752cf67997f7ee0954971bb1a03ab4805231301e7f5bb734ff38276e0"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.036055 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.036081 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.040922 5010 generic.go:334] "Generic (PLEG): container finished" podID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerID="c199513abd3b850396eeb0c05485b757b7f3232cc79b9e3e23076744c0285f9b" exitCode=0 Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.040956 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" event={"ID":"2489d4d4-6f5b-466c-9dda-b253dfc9912b","Type":"ContainerDied","Data":"c199513abd3b850396eeb0c05485b757b7f3232cc79b9e3e23076744c0285f9b"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.040974 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" event={"ID":"2489d4d4-6f5b-466c-9dda-b253dfc9912b","Type":"ContainerStarted","Data":"a1197758f4f88880de06842168cf8064b3930d1117c5b4290b956246f145909a"} Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.057379 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6d77f6958-2br52" podStartSLOduration=2.05735753 podStartE2EDuration="2.05735753s" podCreationTimestamp="2025-11-26 17:02:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:20.047150446 +0000 UTC m=+5760.837867594" watchObservedRunningTime="2025-11-26 17:02:20.05735753 +0000 UTC m=+5760.848074678" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.081666 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-68c7b56cb5-899x5" podStartSLOduration=2.081643264 podStartE2EDuration="2.081643264s" podCreationTimestamp="2025-11-26 17:02:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:20.076895006 +0000 UTC m=+5760.867612164" watchObservedRunningTime="2025-11-26 17:02:20.081643264 +0000 UTC m=+5760.872360422" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.175254 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5dbf4fb94-q764z" podStartSLOduration=2.175165852 podStartE2EDuration="2.175165852s" podCreationTimestamp="2025-11-26 17:02:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:20.099786146 +0000 UTC m=+5760.890503304" watchObservedRunningTime="2025-11-26 17:02:20.175165852 +0000 UTC m=+5760.965883030" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.557811 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7545687684-8xwxg"] Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.560186 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.562806 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.570405 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7545687684-8xwxg"] Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.571348 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.591979 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff07778f-8a03-4601-8581-e66658b53274-logs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.592049 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-combined-ca-bundle\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.592087 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s46xc\" (UniqueName: \"kubernetes.io/projected/ff07778f-8a03-4601-8581-e66658b53274-kube-api-access-s46xc\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.592154 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-config-data\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.592477 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-config-data-custom\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.592659 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-internal-tls-certs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.592826 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-public-tls-certs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694203 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff07778f-8a03-4601-8581-e66658b53274-logs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694254 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-combined-ca-bundle\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694284 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s46xc\" (UniqueName: \"kubernetes.io/projected/ff07778f-8a03-4601-8581-e66658b53274-kube-api-access-s46xc\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694307 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-config-data\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694373 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-config-data-custom\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694408 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-internal-tls-certs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694444 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-public-tls-certs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.694773 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ff07778f-8a03-4601-8581-e66658b53274-logs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.699555 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-public-tls-certs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.703194 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-internal-tls-certs\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.704409 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-config-data\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.711401 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-config-data-custom\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.712195 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff07778f-8a03-4601-8581-e66658b53274-combined-ca-bundle\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.714760 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s46xc\" (UniqueName: \"kubernetes.io/projected/ff07778f-8a03-4601-8581-e66658b53274-kube-api-access-s46xc\") pod \"barbican-api-7545687684-8xwxg\" (UID: \"ff07778f-8a03-4601-8581-e66658b53274\") " pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:20 crc kubenswrapper[5010]: I1126 17:02:20.899556 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:21 crc kubenswrapper[5010]: I1126 17:02:21.065336 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" event={"ID":"2489d4d4-6f5b-466c-9dda-b253dfc9912b","Type":"ContainerStarted","Data":"4de2c5af96140abdae15985b70ab60fb8cc59491adbf8421572de0a86807fc50"} Nov 26 17:02:21 crc kubenswrapper[5010]: I1126 17:02:21.065774 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:21 crc kubenswrapper[5010]: I1126 17:02:21.089401 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" podStartSLOduration=3.089375912 podStartE2EDuration="3.089375912s" podCreationTimestamp="2025-11-26 17:02:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:21.083117386 +0000 UTC m=+5761.873834544" watchObservedRunningTime="2025-11-26 17:02:21.089375912 +0000 UTC m=+5761.880093070" Nov 26 17:02:21 crc kubenswrapper[5010]: I1126 17:02:21.327671 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7545687684-8xwxg"] Nov 26 17:02:22 crc kubenswrapper[5010]: I1126 17:02:22.076283 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7545687684-8xwxg" event={"ID":"ff07778f-8a03-4601-8581-e66658b53274","Type":"ContainerStarted","Data":"7e2d4d309783123646e509733a7e1f6af4bf884724eec75428b883dcb14c71b5"} Nov 26 17:02:22 crc kubenswrapper[5010]: I1126 17:02:22.076617 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7545687684-8xwxg" event={"ID":"ff07778f-8a03-4601-8581-e66658b53274","Type":"ContainerStarted","Data":"1dd57ddfc3790e8148f2353ce014f7155bd34d2b7ca56f355963fbfd876d2b96"} Nov 26 17:02:22 crc kubenswrapper[5010]: I1126 17:02:22.076634 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7545687684-8xwxg" event={"ID":"ff07778f-8a03-4601-8581-e66658b53274","Type":"ContainerStarted","Data":"597fa2f66bc8052e909353ce122a4865816948dac6e07cd2f06d64b367f47ac9"} Nov 26 17:02:22 crc kubenswrapper[5010]: I1126 17:02:22.077115 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:22 crc kubenswrapper[5010]: I1126 17:02:22.077150 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:22 crc kubenswrapper[5010]: I1126 17:02:22.097276 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7545687684-8xwxg" podStartSLOduration=2.097258023 podStartE2EDuration="2.097258023s" podCreationTimestamp="2025-11-26 17:02:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:22.092885074 +0000 UTC m=+5762.883602242" watchObservedRunningTime="2025-11-26 17:02:22.097258023 +0000 UTC m=+5762.887975171" Nov 26 17:02:22 crc kubenswrapper[5010]: I1126 17:02:22.891547 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:02:22 crc kubenswrapper[5010]: E1126 17:02:22.892229 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:02:28 crc kubenswrapper[5010]: I1126 17:02:28.791856 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:02:28 crc kubenswrapper[5010]: I1126 17:02:28.851066 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d9b4bcd5-swhgr"] Nov 26 17:02:28 crc kubenswrapper[5010]: I1126 17:02:28.854617 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" podUID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerName="dnsmasq-dns" containerID="cri-o://c35b879200ae7deacde750d0c4d9662644a9f5cead1e81dcfb2dbcf9f8fbf84e" gracePeriod=10 Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.142804 5010 generic.go:334] "Generic (PLEG): container finished" podID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerID="c35b879200ae7deacde750d0c4d9662644a9f5cead1e81dcfb2dbcf9f8fbf84e" exitCode=0 Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.142850 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" event={"ID":"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d","Type":"ContainerDied","Data":"c35b879200ae7deacde750d0c4d9662644a9f5cead1e81dcfb2dbcf9f8fbf84e"} Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.463590 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.614819 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-dns-svc\") pod \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.615027 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kktgr\" (UniqueName: \"kubernetes.io/projected/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-kube-api-access-kktgr\") pod \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.615126 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-nb\") pod \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.615168 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-sb\") pod \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.615214 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-config\") pod \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\" (UID: \"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d\") " Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.620862 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-kube-api-access-kktgr" (OuterVolumeSpecName: "kube-api-access-kktgr") pod "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" (UID: "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d"). InnerVolumeSpecName "kube-api-access-kktgr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.659065 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-config" (OuterVolumeSpecName: "config") pod "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" (UID: "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.660143 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" (UID: "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.666162 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" (UID: "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.667628 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" (UID: "8dbf07e2-4d29-427e-acfb-6e607e5d6f9d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.716842 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.717083 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.717140 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.717189 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:29 crc kubenswrapper[5010]: I1126 17:02:29.717238 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kktgr\" (UniqueName: \"kubernetes.io/projected/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d-kube-api-access-kktgr\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.153249 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" event={"ID":"8dbf07e2-4d29-427e-acfb-6e607e5d6f9d","Type":"ContainerDied","Data":"2794a5a467ce31b8c0fe5da0b554ecab1cce51ee6dd7da8b9071738840751f78"} Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.153637 5010 scope.go:117] "RemoveContainer" containerID="c35b879200ae7deacde750d0c4d9662644a9f5cead1e81dcfb2dbcf9f8fbf84e" Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.153307 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d9b4bcd5-swhgr" Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.181016 5010 scope.go:117] "RemoveContainer" containerID="7c4f8413a2af61b16b13663a350314a5104f11968f9b9a9ec80d3f58dee0be28" Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.190894 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d9b4bcd5-swhgr"] Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.204821 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d9b4bcd5-swhgr"] Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.344687 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:30 crc kubenswrapper[5010]: I1126 17:02:30.595479 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:31 crc kubenswrapper[5010]: I1126 17:02:31.904685 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" path="/var/lib/kubelet/pods/8dbf07e2-4d29-427e-acfb-6e607e5d6f9d/volumes" Nov 26 17:02:32 crc kubenswrapper[5010]: I1126 17:02:32.312879 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:32 crc kubenswrapper[5010]: I1126 17:02:32.379627 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7545687684-8xwxg" Nov 26 17:02:32 crc kubenswrapper[5010]: I1126 17:02:32.448161 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5dbf4fb94-q764z"] Nov 26 17:02:32 crc kubenswrapper[5010]: I1126 17:02:32.448395 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5dbf4fb94-q764z" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api-log" containerID="cri-o://80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b" gracePeriod=30 Nov 26 17:02:32 crc kubenswrapper[5010]: I1126 17:02:32.448525 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5dbf4fb94-q764z" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api" containerID="cri-o://8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d" gracePeriod=30 Nov 26 17:02:33 crc kubenswrapper[5010]: I1126 17:02:33.198453 5010 generic.go:334] "Generic (PLEG): container finished" podID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerID="80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b" exitCode=143 Nov 26 17:02:33 crc kubenswrapper[5010]: I1126 17:02:33.198587 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dbf4fb94-q764z" event={"ID":"44317e44-3a3c-4773-8c44-8b33818d1a58","Type":"ContainerDied","Data":"80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b"} Nov 26 17:02:33 crc kubenswrapper[5010]: I1126 17:02:33.891553 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:02:33 crc kubenswrapper[5010]: E1126 17:02:33.892166 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:02:35 crc kubenswrapper[5010]: I1126 17:02:35.621376 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5dbf4fb94-q764z" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.1.50:9311/healthcheck\": read tcp 10.217.0.2:46838->10.217.1.50:9311: read: connection reset by peer" Nov 26 17:02:35 crc kubenswrapper[5010]: I1126 17:02:35.621396 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5dbf4fb94-q764z" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.1.50:9311/healthcheck\": read tcp 10.217.0.2:46836->10.217.1.50:9311: read: connection reset by peer" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.009149 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.151050 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data\") pod \"44317e44-3a3c-4773-8c44-8b33818d1a58\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.151100 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data-custom\") pod \"44317e44-3a3c-4773-8c44-8b33818d1a58\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.151159 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44317e44-3a3c-4773-8c44-8b33818d1a58-logs\") pod \"44317e44-3a3c-4773-8c44-8b33818d1a58\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.151179 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jqp8m\" (UniqueName: \"kubernetes.io/projected/44317e44-3a3c-4773-8c44-8b33818d1a58-kube-api-access-jqp8m\") pod \"44317e44-3a3c-4773-8c44-8b33818d1a58\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.151333 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-combined-ca-bundle\") pod \"44317e44-3a3c-4773-8c44-8b33818d1a58\" (UID: \"44317e44-3a3c-4773-8c44-8b33818d1a58\") " Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.152211 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44317e44-3a3c-4773-8c44-8b33818d1a58-logs" (OuterVolumeSpecName: "logs") pod "44317e44-3a3c-4773-8c44-8b33818d1a58" (UID: "44317e44-3a3c-4773-8c44-8b33818d1a58"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.174752 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "44317e44-3a3c-4773-8c44-8b33818d1a58" (UID: "44317e44-3a3c-4773-8c44-8b33818d1a58"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.174837 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44317e44-3a3c-4773-8c44-8b33818d1a58-kube-api-access-jqp8m" (OuterVolumeSpecName: "kube-api-access-jqp8m") pod "44317e44-3a3c-4773-8c44-8b33818d1a58" (UID: "44317e44-3a3c-4773-8c44-8b33818d1a58"). InnerVolumeSpecName "kube-api-access-jqp8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.177914 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "44317e44-3a3c-4773-8c44-8b33818d1a58" (UID: "44317e44-3a3c-4773-8c44-8b33818d1a58"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.195721 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data" (OuterVolumeSpecName: "config-data") pod "44317e44-3a3c-4773-8c44-8b33818d1a58" (UID: "44317e44-3a3c-4773-8c44-8b33818d1a58"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.229702 5010 generic.go:334] "Generic (PLEG): container finished" podID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerID="8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d" exitCode=0 Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.229768 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dbf4fb94-q764z" event={"ID":"44317e44-3a3c-4773-8c44-8b33818d1a58","Type":"ContainerDied","Data":"8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d"} Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.229802 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5dbf4fb94-q764z" event={"ID":"44317e44-3a3c-4773-8c44-8b33818d1a58","Type":"ContainerDied","Data":"06a67e0752cf67997f7ee0954971bb1a03ab4805231301e7f5bb734ff38276e0"} Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.229822 5010 scope.go:117] "RemoveContainer" containerID="8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.229768 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5dbf4fb94-q764z" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.255494 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.255547 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.255562 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44317e44-3a3c-4773-8c44-8b33818d1a58-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.255575 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jqp8m\" (UniqueName: \"kubernetes.io/projected/44317e44-3a3c-4773-8c44-8b33818d1a58-kube-api-access-jqp8m\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.255587 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/44317e44-3a3c-4773-8c44-8b33818d1a58-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.257780 5010 scope.go:117] "RemoveContainer" containerID="80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.264355 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5dbf4fb94-q764z"] Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.272402 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5dbf4fb94-q764z"] Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.275989 5010 scope.go:117] "RemoveContainer" containerID="8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d" Nov 26 17:02:36 crc kubenswrapper[5010]: E1126 17:02:36.276379 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d\": container with ID starting with 8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d not found: ID does not exist" containerID="8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.276426 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d"} err="failed to get container status \"8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d\": rpc error: code = NotFound desc = could not find container \"8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d\": container with ID starting with 8984d487d36dfa792b2ca51651f0fea1a3de581ee3296727217a4c1eddf73f1d not found: ID does not exist" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.276456 5010 scope.go:117] "RemoveContainer" containerID="80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b" Nov 26 17:02:36 crc kubenswrapper[5010]: E1126 17:02:36.276784 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b\": container with ID starting with 80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b not found: ID does not exist" containerID="80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b" Nov 26 17:02:36 crc kubenswrapper[5010]: I1126 17:02:36.276827 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b"} err="failed to get container status \"80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b\": rpc error: code = NotFound desc = could not find container \"80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b\": container with ID starting with 80cbefb9fa09ce2f6bec380e3d2e266ef674f7a5bbef017e0a09f7a6c793035b not found: ID does not exist" Nov 26 17:02:37 crc kubenswrapper[5010]: I1126 17:02:37.902401 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" path="/var/lib/kubelet/pods/44317e44-3a3c-4773-8c44-8b33818d1a58/volumes" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.333137 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-t2jh7"] Nov 26 17:02:39 crc kubenswrapper[5010]: E1126 17:02:39.333937 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.333952 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api" Nov 26 17:02:39 crc kubenswrapper[5010]: E1126 17:02:39.333973 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerName="dnsmasq-dns" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.333979 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerName="dnsmasq-dns" Nov 26 17:02:39 crc kubenswrapper[5010]: E1126 17:02:39.333989 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api-log" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.333996 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api-log" Nov 26 17:02:39 crc kubenswrapper[5010]: E1126 17:02:39.334004 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerName="init" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.334009 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerName="init" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.334177 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api-log" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.334194 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dbf07e2-4d29-427e-acfb-6e607e5d6f9d" containerName="dnsmasq-dns" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.334206 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="44317e44-3a3c-4773-8c44-8b33818d1a58" containerName="barbican-api" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.334909 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.344785 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-t2jh7"] Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.416846 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-b951-account-create-update-rck95"] Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.418041 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.423568 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.427865 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b951-account-create-update-rck95"] Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.508747 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-chg8l\" (UniqueName: \"kubernetes.io/projected/45abb0ea-b89d-4041-9371-6c1433aa3123-kube-api-access-chg8l\") pod \"neutron-db-create-t2jh7\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.509009 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-operator-scripts\") pod \"neutron-b951-account-create-update-rck95\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.509099 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abb0ea-b89d-4041-9371-6c1433aa3123-operator-scripts\") pod \"neutron-db-create-t2jh7\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.509207 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4prg5\" (UniqueName: \"kubernetes.io/projected/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-kube-api-access-4prg5\") pod \"neutron-b951-account-create-update-rck95\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.610612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abb0ea-b89d-4041-9371-6c1433aa3123-operator-scripts\") pod \"neutron-db-create-t2jh7\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.610760 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4prg5\" (UniqueName: \"kubernetes.io/projected/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-kube-api-access-4prg5\") pod \"neutron-b951-account-create-update-rck95\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.610828 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-chg8l\" (UniqueName: \"kubernetes.io/projected/45abb0ea-b89d-4041-9371-6c1433aa3123-kube-api-access-chg8l\") pod \"neutron-db-create-t2jh7\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.610897 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-operator-scripts\") pod \"neutron-b951-account-create-update-rck95\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.611736 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-operator-scripts\") pod \"neutron-b951-account-create-update-rck95\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.611955 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abb0ea-b89d-4041-9371-6c1433aa3123-operator-scripts\") pod \"neutron-db-create-t2jh7\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.631391 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-chg8l\" (UniqueName: \"kubernetes.io/projected/45abb0ea-b89d-4041-9371-6c1433aa3123-kube-api-access-chg8l\") pod \"neutron-db-create-t2jh7\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.631419 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4prg5\" (UniqueName: \"kubernetes.io/projected/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-kube-api-access-4prg5\") pod \"neutron-b951-account-create-update-rck95\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.671229 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:39 crc kubenswrapper[5010]: I1126 17:02:39.735998 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:40 crc kubenswrapper[5010]: I1126 17:02:40.173999 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-t2jh7"] Nov 26 17:02:40 crc kubenswrapper[5010]: I1126 17:02:40.268760 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-t2jh7" event={"ID":"45abb0ea-b89d-4041-9371-6c1433aa3123","Type":"ContainerStarted","Data":"05928856fc5572165405a706aaa203f12cb101c67338bd0cd28ba7ae4364afa4"} Nov 26 17:02:40 crc kubenswrapper[5010]: I1126 17:02:40.269960 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-b951-account-create-update-rck95"] Nov 26 17:02:40 crc kubenswrapper[5010]: W1126 17:02:40.272225 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod406b38b3_8408_49c9_88ee_bf1c2f8a85d9.slice/crio-9d4acf4e18edc61603d6bf28ac71911b225255fed6f56747e986b814c81ccd6b WatchSource:0}: Error finding container 9d4acf4e18edc61603d6bf28ac71911b225255fed6f56747e986b814c81ccd6b: Status 404 returned error can't find the container with id 9d4acf4e18edc61603d6bf28ac71911b225255fed6f56747e986b814c81ccd6b Nov 26 17:02:41 crc kubenswrapper[5010]: I1126 17:02:41.280824 5010 generic.go:334] "Generic (PLEG): container finished" podID="406b38b3-8408-49c9-88ee-bf1c2f8a85d9" containerID="1f878451f9937ca0ca2e65f23b817b0e59ac96dfcdfdc005a1de740391f61c16" exitCode=0 Nov 26 17:02:41 crc kubenswrapper[5010]: I1126 17:02:41.280902 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b951-account-create-update-rck95" event={"ID":"406b38b3-8408-49c9-88ee-bf1c2f8a85d9","Type":"ContainerDied","Data":"1f878451f9937ca0ca2e65f23b817b0e59ac96dfcdfdc005a1de740391f61c16"} Nov 26 17:02:41 crc kubenswrapper[5010]: I1126 17:02:41.281201 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b951-account-create-update-rck95" event={"ID":"406b38b3-8408-49c9-88ee-bf1c2f8a85d9","Type":"ContainerStarted","Data":"9d4acf4e18edc61603d6bf28ac71911b225255fed6f56747e986b814c81ccd6b"} Nov 26 17:02:41 crc kubenswrapper[5010]: I1126 17:02:41.283524 5010 generic.go:334] "Generic (PLEG): container finished" podID="45abb0ea-b89d-4041-9371-6c1433aa3123" containerID="61375f021fb229dc2350ee3a57be8bea7576eff82ae49749769a6984bbb22f84" exitCode=0 Nov 26 17:02:41 crc kubenswrapper[5010]: I1126 17:02:41.283587 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-t2jh7" event={"ID":"45abb0ea-b89d-4041-9371-6c1433aa3123","Type":"ContainerDied","Data":"61375f021fb229dc2350ee3a57be8bea7576eff82ae49749769a6984bbb22f84"} Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.726605 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.738101 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.870277 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chg8l\" (UniqueName: \"kubernetes.io/projected/45abb0ea-b89d-4041-9371-6c1433aa3123-kube-api-access-chg8l\") pod \"45abb0ea-b89d-4041-9371-6c1433aa3123\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.870341 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-operator-scripts\") pod \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.870396 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abb0ea-b89d-4041-9371-6c1433aa3123-operator-scripts\") pod \"45abb0ea-b89d-4041-9371-6c1433aa3123\" (UID: \"45abb0ea-b89d-4041-9371-6c1433aa3123\") " Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.870542 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4prg5\" (UniqueName: \"kubernetes.io/projected/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-kube-api-access-4prg5\") pod \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\" (UID: \"406b38b3-8408-49c9-88ee-bf1c2f8a85d9\") " Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.870933 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/45abb0ea-b89d-4041-9371-6c1433aa3123-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "45abb0ea-b89d-4041-9371-6c1433aa3123" (UID: "45abb0ea-b89d-4041-9371-6c1433aa3123"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.871206 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "406b38b3-8408-49c9-88ee-bf1c2f8a85d9" (UID: "406b38b3-8408-49c9-88ee-bf1c2f8a85d9"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.871725 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.871750 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/45abb0ea-b89d-4041-9371-6c1433aa3123-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.883515 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45abb0ea-b89d-4041-9371-6c1433aa3123-kube-api-access-chg8l" (OuterVolumeSpecName: "kube-api-access-chg8l") pod "45abb0ea-b89d-4041-9371-6c1433aa3123" (UID: "45abb0ea-b89d-4041-9371-6c1433aa3123"). InnerVolumeSpecName "kube-api-access-chg8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.883796 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-kube-api-access-4prg5" (OuterVolumeSpecName: "kube-api-access-4prg5") pod "406b38b3-8408-49c9-88ee-bf1c2f8a85d9" (UID: "406b38b3-8408-49c9-88ee-bf1c2f8a85d9"). InnerVolumeSpecName "kube-api-access-4prg5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.975215 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4prg5\" (UniqueName: \"kubernetes.io/projected/406b38b3-8408-49c9-88ee-bf1c2f8a85d9-kube-api-access-4prg5\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:42 crc kubenswrapper[5010]: I1126 17:02:42.975253 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chg8l\" (UniqueName: \"kubernetes.io/projected/45abb0ea-b89d-4041-9371-6c1433aa3123-kube-api-access-chg8l\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:43 crc kubenswrapper[5010]: I1126 17:02:43.307258 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-t2jh7" event={"ID":"45abb0ea-b89d-4041-9371-6c1433aa3123","Type":"ContainerDied","Data":"05928856fc5572165405a706aaa203f12cb101c67338bd0cd28ba7ae4364afa4"} Nov 26 17:02:43 crc kubenswrapper[5010]: I1126 17:02:43.307312 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="05928856fc5572165405a706aaa203f12cb101c67338bd0cd28ba7ae4364afa4" Nov 26 17:02:43 crc kubenswrapper[5010]: I1126 17:02:43.307343 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-t2jh7" Nov 26 17:02:43 crc kubenswrapper[5010]: I1126 17:02:43.317139 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-b951-account-create-update-rck95" event={"ID":"406b38b3-8408-49c9-88ee-bf1c2f8a85d9","Type":"ContainerDied","Data":"9d4acf4e18edc61603d6bf28ac71911b225255fed6f56747e986b814c81ccd6b"} Nov 26 17:02:43 crc kubenswrapper[5010]: I1126 17:02:43.317192 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9d4acf4e18edc61603d6bf28ac71911b225255fed6f56747e986b814c81ccd6b" Nov 26 17:02:43 crc kubenswrapper[5010]: I1126 17:02:43.317289 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-b951-account-create-update-rck95" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.660866 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-n8w48"] Nov 26 17:02:44 crc kubenswrapper[5010]: E1126 17:02:44.661529 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="406b38b3-8408-49c9-88ee-bf1c2f8a85d9" containerName="mariadb-account-create-update" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.661545 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="406b38b3-8408-49c9-88ee-bf1c2f8a85d9" containerName="mariadb-account-create-update" Nov 26 17:02:44 crc kubenswrapper[5010]: E1126 17:02:44.661587 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45abb0ea-b89d-4041-9371-6c1433aa3123" containerName="mariadb-database-create" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.661596 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="45abb0ea-b89d-4041-9371-6c1433aa3123" containerName="mariadb-database-create" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.661821 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="45abb0ea-b89d-4041-9371-6c1433aa3123" containerName="mariadb-database-create" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.661851 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="406b38b3-8408-49c9-88ee-bf1c2f8a85d9" containerName="mariadb-account-create-update" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.662568 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.664800 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.665041 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.665740 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2qfmw" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.673080 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-n8w48"] Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.805061 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-config\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.805297 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cddrn\" (UniqueName: \"kubernetes.io/projected/da992071-826e-44cd-83f6-2c190a5a73f6-kube-api-access-cddrn\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.805464 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-combined-ca-bundle\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.892268 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:02:44 crc kubenswrapper[5010]: E1126 17:02:44.892463 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.906646 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-config\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.906756 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cddrn\" (UniqueName: \"kubernetes.io/projected/da992071-826e-44cd-83f6-2c190a5a73f6-kube-api-access-cddrn\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.906794 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-combined-ca-bundle\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.920019 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-config\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.920051 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-combined-ca-bundle\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.922622 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cddrn\" (UniqueName: \"kubernetes.io/projected/da992071-826e-44cd-83f6-2c190a5a73f6-kube-api-access-cddrn\") pod \"neutron-db-sync-n8w48\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:44 crc kubenswrapper[5010]: I1126 17:02:44.988905 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:45 crc kubenswrapper[5010]: I1126 17:02:45.426040 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-n8w48"] Nov 26 17:02:46 crc kubenswrapper[5010]: I1126 17:02:46.345219 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n8w48" event={"ID":"da992071-826e-44cd-83f6-2c190a5a73f6","Type":"ContainerStarted","Data":"1663156357ed77d79325bdcb551b5e9315b982db39ccf9ab300103426d7c4c3b"} Nov 26 17:02:46 crc kubenswrapper[5010]: I1126 17:02:46.345767 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n8w48" event={"ID":"da992071-826e-44cd-83f6-2c190a5a73f6","Type":"ContainerStarted","Data":"6046bb38d5202a640853bd0ad216500e5d639ff027fe14504fb77b80a22154e8"} Nov 26 17:02:46 crc kubenswrapper[5010]: I1126 17:02:46.373509 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-n8w48" podStartSLOduration=2.373464708 podStartE2EDuration="2.373464708s" podCreationTimestamp="2025-11-26 17:02:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:46.366628318 +0000 UTC m=+5787.157345486" watchObservedRunningTime="2025-11-26 17:02:46.373464708 +0000 UTC m=+5787.164181896" Nov 26 17:02:50 crc kubenswrapper[5010]: I1126 17:02:50.386638 5010 generic.go:334] "Generic (PLEG): container finished" podID="da992071-826e-44cd-83f6-2c190a5a73f6" containerID="1663156357ed77d79325bdcb551b5e9315b982db39ccf9ab300103426d7c4c3b" exitCode=0 Nov 26 17:02:50 crc kubenswrapper[5010]: I1126 17:02:50.386768 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n8w48" event={"ID":"da992071-826e-44cd-83f6-2c190a5a73f6","Type":"ContainerDied","Data":"1663156357ed77d79325bdcb551b5e9315b982db39ccf9ab300103426d7c4c3b"} Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.737903 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.853922 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-combined-ca-bundle\") pod \"da992071-826e-44cd-83f6-2c190a5a73f6\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.854346 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-config\") pod \"da992071-826e-44cd-83f6-2c190a5a73f6\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.854440 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cddrn\" (UniqueName: \"kubernetes.io/projected/da992071-826e-44cd-83f6-2c190a5a73f6-kube-api-access-cddrn\") pod \"da992071-826e-44cd-83f6-2c190a5a73f6\" (UID: \"da992071-826e-44cd-83f6-2c190a5a73f6\") " Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.859895 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da992071-826e-44cd-83f6-2c190a5a73f6-kube-api-access-cddrn" (OuterVolumeSpecName: "kube-api-access-cddrn") pod "da992071-826e-44cd-83f6-2c190a5a73f6" (UID: "da992071-826e-44cd-83f6-2c190a5a73f6"). InnerVolumeSpecName "kube-api-access-cddrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.878691 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-config" (OuterVolumeSpecName: "config") pod "da992071-826e-44cd-83f6-2c190a5a73f6" (UID: "da992071-826e-44cd-83f6-2c190a5a73f6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.879900 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "da992071-826e-44cd-83f6-2c190a5a73f6" (UID: "da992071-826e-44cd-83f6-2c190a5a73f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.956730 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.956754 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/da992071-826e-44cd-83f6-2c190a5a73f6-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:51 crc kubenswrapper[5010]: I1126 17:02:51.956764 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cddrn\" (UniqueName: \"kubernetes.io/projected/da992071-826e-44cd-83f6-2c190a5a73f6-kube-api-access-cddrn\") on node \"crc\" DevicePath \"\"" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.413444 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-n8w48" event={"ID":"da992071-826e-44cd-83f6-2c190a5a73f6","Type":"ContainerDied","Data":"6046bb38d5202a640853bd0ad216500e5d639ff027fe14504fb77b80a22154e8"} Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.413490 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6046bb38d5202a640853bd0ad216500e5d639ff027fe14504fb77b80a22154e8" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.413951 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-n8w48" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.680873 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6b8f57cd8c-znnc4"] Nov 26 17:02:52 crc kubenswrapper[5010]: E1126 17:02:52.681561 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da992071-826e-44cd-83f6-2c190a5a73f6" containerName="neutron-db-sync" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.681591 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="da992071-826e-44cd-83f6-2c190a5a73f6" containerName="neutron-db-sync" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.681807 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="da992071-826e-44cd-83f6-2c190a5a73f6" containerName="neutron-db-sync" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.683104 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.687416 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8f57cd8c-znnc4"] Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.783817 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.783912 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srt96\" (UniqueName: \"kubernetes.io/projected/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-kube-api-access-srt96\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.783964 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.784033 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-config\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.784075 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-dns-svc\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.865810 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6dff657544-xqtrb"] Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.867960 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.874627 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.875494 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2qfmw" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.875758 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.879661 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.885793 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.888794 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-config\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.888872 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-dns-svc\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.889017 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.889135 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srt96\" (UniqueName: \"kubernetes.io/projected/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-kube-api-access-srt96\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.886878 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-sb\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.890249 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-config\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.891905 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-nb\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.893437 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-dns-svc\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.897373 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6dff657544-xqtrb"] Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.952613 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srt96\" (UniqueName: \"kubernetes.io/projected/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-kube-api-access-srt96\") pod \"dnsmasq-dns-6b8f57cd8c-znnc4\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.992323 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-httpd-config\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.992383 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc4qr\" (UniqueName: \"kubernetes.io/projected/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-kube-api-access-sc4qr\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.992954 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-ovndb-tls-certs\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.993051 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-combined-ca-bundle\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.993078 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-config\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:52 crc kubenswrapper[5010]: I1126 17:02:52.998687 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.096610 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc4qr\" (UniqueName: \"kubernetes.io/projected/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-kube-api-access-sc4qr\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.096685 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-ovndb-tls-certs\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.096753 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-combined-ca-bundle\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.096773 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-config\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.096841 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-httpd-config\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.101310 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-ovndb-tls-certs\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.102191 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-httpd-config\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.104288 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-combined-ca-bundle\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.113485 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-config\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.114527 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc4qr\" (UniqueName: \"kubernetes.io/projected/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-kube-api-access-sc4qr\") pod \"neutron-6dff657544-xqtrb\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.203702 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.502649 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6b8f57cd8c-znnc4"] Nov 26 17:02:53 crc kubenswrapper[5010]: I1126 17:02:53.770127 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6dff657544-xqtrb"] Nov 26 17:02:53 crc kubenswrapper[5010]: W1126 17:02:53.826793 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5ef121a_b9c3_41a5_95ce_4c9ff5a3d908.slice/crio-d0074f6dae11ae5cd3d3e3df2c6f8be58880300744eeed2a333ffa84e5d13c4f WatchSource:0}: Error finding container d0074f6dae11ae5cd3d3e3df2c6f8be58880300744eeed2a333ffa84e5d13c4f: Status 404 returned error can't find the container with id d0074f6dae11ae5cd3d3e3df2c6f8be58880300744eeed2a333ffa84e5d13c4f Nov 26 17:02:54 crc kubenswrapper[5010]: I1126 17:02:54.429814 5010 generic.go:334] "Generic (PLEG): container finished" podID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerID="422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71" exitCode=0 Nov 26 17:02:54 crc kubenswrapper[5010]: I1126 17:02:54.429914 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" event={"ID":"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff","Type":"ContainerDied","Data":"422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71"} Nov 26 17:02:54 crc kubenswrapper[5010]: I1126 17:02:54.430205 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" event={"ID":"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff","Type":"ContainerStarted","Data":"9720876f96aa66af521f7ea4a9f8e0ee9cb3affd27935fd69a94ebf7140e9111"} Nov 26 17:02:54 crc kubenswrapper[5010]: I1126 17:02:54.432087 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6dff657544-xqtrb" event={"ID":"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908","Type":"ContainerStarted","Data":"c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823"} Nov 26 17:02:54 crc kubenswrapper[5010]: I1126 17:02:54.432122 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6dff657544-xqtrb" event={"ID":"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908","Type":"ContainerStarted","Data":"9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be"} Nov 26 17:02:54 crc kubenswrapper[5010]: I1126 17:02:54.432132 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6dff657544-xqtrb" event={"ID":"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908","Type":"ContainerStarted","Data":"d0074f6dae11ae5cd3d3e3df2c6f8be58880300744eeed2a333ffa84e5d13c4f"} Nov 26 17:02:54 crc kubenswrapper[5010]: I1126 17:02:54.432230 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.394937 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6dff657544-xqtrb" podStartSLOduration=3.394914426 podStartE2EDuration="3.394914426s" podCreationTimestamp="2025-11-26 17:02:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:54.488327125 +0000 UTC m=+5795.279044283" watchObservedRunningTime="2025-11-26 17:02:55.394914426 +0000 UTC m=+5796.185631574" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.402062 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-86bf4f9bd7-vb726"] Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.403871 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: W1126 17:02:55.407258 5010 reflector.go:561] object-"openstack"/"cert-neutron-internal-svc": failed to list *v1.Secret: secrets "cert-neutron-internal-svc" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 26 17:02:55 crc kubenswrapper[5010]: E1126 17:02:55.407307 5010 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"cert-neutron-internal-svc\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cert-neutron-internal-svc\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 17:02:55 crc kubenswrapper[5010]: W1126 17:02:55.407472 5010 reflector.go:561] object-"openstack"/"cert-neutron-public-svc": failed to list *v1.Secret: secrets "cert-neutron-public-svc" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 26 17:02:55 crc kubenswrapper[5010]: E1126 17:02:55.407495 5010 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"cert-neutron-public-svc\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cert-neutron-public-svc\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.414302 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86bf4f9bd7-vb726"] Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.446683 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" event={"ID":"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff","Type":"ContainerStarted","Data":"85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852"} Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.446754 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.478724 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" podStartSLOduration=3.478688301 podStartE2EDuration="3.478688301s" podCreationTimestamp="2025-11-26 17:02:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:55.47264066 +0000 UTC m=+5796.263357818" watchObservedRunningTime="2025-11-26 17:02:55.478688301 +0000 UTC m=+5796.269405449" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.569441 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-ovndb-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.569617 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-85dgf\" (UniqueName: \"kubernetes.io/projected/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-kube-api-access-85dgf\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.569659 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-public-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.569728 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-config\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.569837 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-httpd-config\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.569971 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-internal-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.570046 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-combined-ca-bundle\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.671969 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-internal-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.672022 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-combined-ca-bundle\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.672083 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-ovndb-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.672149 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-85dgf\" (UniqueName: \"kubernetes.io/projected/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-kube-api-access-85dgf\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.672172 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-public-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.672195 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-config\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.672229 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-httpd-config\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.679075 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-combined-ca-bundle\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.680065 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-ovndb-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.680113 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-httpd-config\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.683423 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-config\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:55 crc kubenswrapper[5010]: I1126 17:02:55.691936 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-85dgf\" (UniqueName: \"kubernetes.io/projected/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-kube-api-access-85dgf\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:56 crc kubenswrapper[5010]: I1126 17:02:56.618285 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 26 17:02:56 crc kubenswrapper[5010]: I1126 17:02:56.628382 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-public-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:56 crc kubenswrapper[5010]: E1126 17:02:56.673137 5010 secret.go:188] Couldn't get secret openstack/cert-neutron-internal-svc: failed to sync secret cache: timed out waiting for the condition Nov 26 17:02:56 crc kubenswrapper[5010]: E1126 17:02:56.673217 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-internal-tls-certs podName:6cc94ff2-fe26-443d-bdbc-c376d3aa59ba nodeName:}" failed. No retries permitted until 2025-11-26 17:02:57.173198956 +0000 UTC m=+5797.963916104 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "internal-tls-certs" (UniqueName: "kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-internal-tls-certs") pod "neutron-86bf4f9bd7-vb726" (UID: "6cc94ff2-fe26-443d-bdbc-c376d3aa59ba") : failed to sync secret cache: timed out waiting for the condition Nov 26 17:02:56 crc kubenswrapper[5010]: I1126 17:02:56.752329 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 26 17:02:57 crc kubenswrapper[5010]: I1126 17:02:57.199472 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-internal-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:57 crc kubenswrapper[5010]: I1126 17:02:57.205664 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/6cc94ff2-fe26-443d-bdbc-c376d3aa59ba-internal-tls-certs\") pod \"neutron-86bf4f9bd7-vb726\" (UID: \"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba\") " pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:57 crc kubenswrapper[5010]: I1126 17:02:57.279245 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:57 crc kubenswrapper[5010]: I1126 17:02:57.841504 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86bf4f9bd7-vb726"] Nov 26 17:02:57 crc kubenswrapper[5010]: W1126 17:02:57.853119 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6cc94ff2_fe26_443d_bdbc_c376d3aa59ba.slice/crio-0899d6c44d35d9c69e9214d81e77cca7661d1cde6ca5b181d96888239a68a67d WatchSource:0}: Error finding container 0899d6c44d35d9c69e9214d81e77cca7661d1cde6ca5b181d96888239a68a67d: Status 404 returned error can't find the container with id 0899d6c44d35d9c69e9214d81e77cca7661d1cde6ca5b181d96888239a68a67d Nov 26 17:02:58 crc kubenswrapper[5010]: I1126 17:02:58.480984 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bf4f9bd7-vb726" event={"ID":"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba","Type":"ContainerStarted","Data":"bf72bd02561e3bcfca35a208cf400a5ea1cf0a9bbe02c36daee4c9a816467290"} Nov 26 17:02:58 crc kubenswrapper[5010]: I1126 17:02:58.481305 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bf4f9bd7-vb726" event={"ID":"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba","Type":"ContainerStarted","Data":"45fbcaf10c8df02c9a1b8f94e58dec985d5a6ba075026a491fe8ccd5f8c5e188"} Nov 26 17:02:58 crc kubenswrapper[5010]: I1126 17:02:58.481315 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86bf4f9bd7-vb726" event={"ID":"6cc94ff2-fe26-443d-bdbc-c376d3aa59ba","Type":"ContainerStarted","Data":"0899d6c44d35d9c69e9214d81e77cca7661d1cde6ca5b181d96888239a68a67d"} Nov 26 17:02:58 crc kubenswrapper[5010]: I1126 17:02:58.482404 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:02:58 crc kubenswrapper[5010]: I1126 17:02:58.510091 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-86bf4f9bd7-vb726" podStartSLOduration=3.510067766 podStartE2EDuration="3.510067766s" podCreationTimestamp="2025-11-26 17:02:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:02:58.508890787 +0000 UTC m=+5799.299607995" watchObservedRunningTime="2025-11-26 17:02:58.510067766 +0000 UTC m=+5799.300784924" Nov 26 17:02:58 crc kubenswrapper[5010]: I1126 17:02:58.891577 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:02:58 crc kubenswrapper[5010]: E1126 17:02:58.891910 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.001080 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.111458 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7546845d6c-qp2tb"] Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.111740 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" podUID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerName="dnsmasq-dns" containerID="cri-o://4de2c5af96140abdae15985b70ab60fb8cc59491adbf8421572de0a86807fc50" gracePeriod=10 Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.556642 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" event={"ID":"2489d4d4-6f5b-466c-9dda-b253dfc9912b","Type":"ContainerDied","Data":"4de2c5af96140abdae15985b70ab60fb8cc59491adbf8421572de0a86807fc50"} Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.556988 5010 generic.go:334] "Generic (PLEG): container finished" podID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerID="4de2c5af96140abdae15985b70ab60fb8cc59491adbf8421572de0a86807fc50" exitCode=0 Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.557030 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" event={"ID":"2489d4d4-6f5b-466c-9dda-b253dfc9912b","Type":"ContainerDied","Data":"a1197758f4f88880de06842168cf8064b3930d1117c5b4290b956246f145909a"} Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.557051 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1197758f4f88880de06842168cf8064b3930d1117c5b4290b956246f145909a" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.594496 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.680023 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-nb\") pod \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.680084 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfgtn\" (UniqueName: \"kubernetes.io/projected/2489d4d4-6f5b-466c-9dda-b253dfc9912b-kube-api-access-pfgtn\") pod \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.680194 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-dns-svc\") pod \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.680243 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-config\") pod \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.680464 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-sb\") pod \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\" (UID: \"2489d4d4-6f5b-466c-9dda-b253dfc9912b\") " Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.691468 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2489d4d4-6f5b-466c-9dda-b253dfc9912b-kube-api-access-pfgtn" (OuterVolumeSpecName: "kube-api-access-pfgtn") pod "2489d4d4-6f5b-466c-9dda-b253dfc9912b" (UID: "2489d4d4-6f5b-466c-9dda-b253dfc9912b"). InnerVolumeSpecName "kube-api-access-pfgtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.729578 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-config" (OuterVolumeSpecName: "config") pod "2489d4d4-6f5b-466c-9dda-b253dfc9912b" (UID: "2489d4d4-6f5b-466c-9dda-b253dfc9912b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.729769 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2489d4d4-6f5b-466c-9dda-b253dfc9912b" (UID: "2489d4d4-6f5b-466c-9dda-b253dfc9912b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.745348 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2489d4d4-6f5b-466c-9dda-b253dfc9912b" (UID: "2489d4d4-6f5b-466c-9dda-b253dfc9912b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.762215 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2489d4d4-6f5b-466c-9dda-b253dfc9912b" (UID: "2489d4d4-6f5b-466c-9dda-b253dfc9912b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.782588 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.782615 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfgtn\" (UniqueName: \"kubernetes.io/projected/2489d4d4-6f5b-466c-9dda-b253dfc9912b-kube-api-access-pfgtn\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.782624 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.782634 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:03 crc kubenswrapper[5010]: I1126 17:03:03.782643 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2489d4d4-6f5b-466c-9dda-b253dfc9912b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:04 crc kubenswrapper[5010]: I1126 17:03:04.567015 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7546845d6c-qp2tb" Nov 26 17:03:04 crc kubenswrapper[5010]: I1126 17:03:04.596180 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7546845d6c-qp2tb"] Nov 26 17:03:04 crc kubenswrapper[5010]: I1126 17:03:04.610548 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7546845d6c-qp2tb"] Nov 26 17:03:05 crc kubenswrapper[5010]: I1126 17:03:05.907338 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" path="/var/lib/kubelet/pods/2489d4d4-6f5b-466c-9dda-b253dfc9912b/volumes" Nov 26 17:03:10 crc kubenswrapper[5010]: I1126 17:03:10.892829 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:03:10 crc kubenswrapper[5010]: E1126 17:03:10.893642 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:03:22 crc kubenswrapper[5010]: I1126 17:03:22.891566 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:03:22 crc kubenswrapper[5010]: E1126 17:03:22.892437 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:03:23 crc kubenswrapper[5010]: I1126 17:03:23.215252 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:03:27 crc kubenswrapper[5010]: I1126 17:03:27.291746 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-86bf4f9bd7-vb726" Nov 26 17:03:27 crc kubenswrapper[5010]: I1126 17:03:27.403260 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6dff657544-xqtrb"] Nov 26 17:03:27 crc kubenswrapper[5010]: I1126 17:03:27.403531 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6dff657544-xqtrb" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-api" containerID="cri-o://9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be" gracePeriod=30 Nov 26 17:03:27 crc kubenswrapper[5010]: I1126 17:03:27.404120 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6dff657544-xqtrb" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-httpd" containerID="cri-o://c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823" gracePeriod=30 Nov 26 17:03:27 crc kubenswrapper[5010]: I1126 17:03:27.785089 5010 generic.go:334] "Generic (PLEG): container finished" podID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerID="c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823" exitCode=0 Nov 26 17:03:27 crc kubenswrapper[5010]: I1126 17:03:27.785165 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6dff657544-xqtrb" event={"ID":"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908","Type":"ContainerDied","Data":"c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823"} Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.647649 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.674490 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-config\") pod \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.674545 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-combined-ca-bundle\") pod \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.674591 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sc4qr\" (UniqueName: \"kubernetes.io/projected/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-kube-api-access-sc4qr\") pod \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.674662 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-httpd-config\") pod \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.674696 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-ovndb-tls-certs\") pod \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\" (UID: \"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908\") " Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.680214 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-kube-api-access-sc4qr" (OuterVolumeSpecName: "kube-api-access-sc4qr") pod "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" (UID: "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908"). InnerVolumeSpecName "kube-api-access-sc4qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.688894 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" (UID: "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.721180 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-config" (OuterVolumeSpecName: "config") pod "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" (UID: "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.731916 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" (UID: "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.745714 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" (UID: "b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.776075 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.776139 5010 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.776154 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.776166 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.776178 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sc4qr\" (UniqueName: \"kubernetes.io/projected/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908-kube-api-access-sc4qr\") on node \"crc\" DevicePath \"\"" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.808945 5010 generic.go:334] "Generic (PLEG): container finished" podID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerID="9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be" exitCode=0 Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.809001 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6dff657544-xqtrb" event={"ID":"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908","Type":"ContainerDied","Data":"9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be"} Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.809019 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6dff657544-xqtrb" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.809034 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6dff657544-xqtrb" event={"ID":"b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908","Type":"ContainerDied","Data":"d0074f6dae11ae5cd3d3e3df2c6f8be58880300744eeed2a333ffa84e5d13c4f"} Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.809058 5010 scope.go:117] "RemoveContainer" containerID="c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.834325 5010 scope.go:117] "RemoveContainer" containerID="9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.850240 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6dff657544-xqtrb"] Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.853490 5010 scope.go:117] "RemoveContainer" containerID="c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823" Nov 26 17:03:29 crc kubenswrapper[5010]: E1126 17:03:29.854075 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823\": container with ID starting with c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823 not found: ID does not exist" containerID="c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.854247 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823"} err="failed to get container status \"c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823\": rpc error: code = NotFound desc = could not find container \"c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823\": container with ID starting with c2242259a6d3fd36e285a8c68cbbebee0a9a0a022d435f7748e4968e2a702823 not found: ID does not exist" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.854379 5010 scope.go:117] "RemoveContainer" containerID="9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be" Nov 26 17:03:29 crc kubenswrapper[5010]: E1126 17:03:29.854759 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be\": container with ID starting with 9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be not found: ID does not exist" containerID="9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.854897 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be"} err="failed to get container status \"9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be\": rpc error: code = NotFound desc = could not find container \"9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be\": container with ID starting with 9ab3e87f103cd7bbdd4b438cba69dc061389c79c37521291ffe421745516f1be not found: ID does not exist" Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.860758 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6dff657544-xqtrb"] Nov 26 17:03:29 crc kubenswrapper[5010]: I1126 17:03:29.906756 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" path="/var/lib/kubelet/pods/b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908/volumes" Nov 26 17:03:36 crc kubenswrapper[5010]: I1126 17:03:36.891735 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:03:36 crc kubenswrapper[5010]: E1126 17:03:36.892935 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:03:48 crc kubenswrapper[5010]: I1126 17:03:48.891765 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:03:48 crc kubenswrapper[5010]: E1126 17:03:48.892592 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.101405 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-brrhc"] Nov 26 17:03:55 crc kubenswrapper[5010]: E1126 17:03:55.102248 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerName="dnsmasq-dns" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.102265 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerName="dnsmasq-dns" Nov 26 17:03:55 crc kubenswrapper[5010]: E1126 17:03:55.102283 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerName="init" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.102291 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerName="init" Nov 26 17:03:55 crc kubenswrapper[5010]: E1126 17:03:55.102317 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-api" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.102325 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-api" Nov 26 17:03:55 crc kubenswrapper[5010]: E1126 17:03:55.102342 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-httpd" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.102349 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-httpd" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.102562 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2489d4d4-6f5b-466c-9dda-b253dfc9912b" containerName="dnsmasq-dns" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.102576 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-httpd" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.102594 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5ef121a-b9c3-41a5-95ce-4c9ff5a3d908" containerName="neutron-api" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.103296 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.104883 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.105086 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.105162 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-27fzn" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.105155 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.105161 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.126559 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-brrhc"] Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.128497 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-combined-ca-bundle\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.128561 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a3b12744-4bf5-44b6-9584-2e2edf84b267-etc-swift\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.128593 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-dispersionconf\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.128635 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dtl56\" (UniqueName: \"kubernetes.io/projected/a3b12744-4bf5-44b6-9584-2e2edf84b267-kube-api-access-dtl56\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.128710 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-ring-data-devices\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.131424 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-scripts\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.131457 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-swiftconf\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.147376 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6cd9fddc67-q99jj"] Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.149343 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.187785 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9fddc67-q99jj"] Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236550 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v25c\" (UniqueName: \"kubernetes.io/projected/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-kube-api-access-7v25c\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236622 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a3b12744-4bf5-44b6-9584-2e2edf84b267-etc-swift\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236663 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-dispersionconf\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236689 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236762 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dtl56\" (UniqueName: \"kubernetes.io/projected/a3b12744-4bf5-44b6-9584-2e2edf84b267-kube-api-access-dtl56\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236788 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-config\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236869 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236900 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-ring-data-devices\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236927 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-scripts\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.236950 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-swiftconf\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.237023 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-dns-svc\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.237061 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-combined-ca-bundle\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.238709 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a3b12744-4bf5-44b6-9584-2e2edf84b267-etc-swift\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.239862 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-scripts\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.240387 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-ring-data-devices\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.242961 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-dispersionconf\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.244367 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-combined-ca-bundle\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.245080 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-swiftconf\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.265256 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dtl56\" (UniqueName: \"kubernetes.io/projected/a3b12744-4bf5-44b6-9584-2e2edf84b267-kube-api-access-dtl56\") pod \"swift-ring-rebalance-brrhc\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.338561 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.338919 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-dns-svc\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.338979 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v25c\" (UniqueName: \"kubernetes.io/projected/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-kube-api-access-7v25c\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.339026 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.339071 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-config\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.339788 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-dns-svc\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.339933 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-config\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.340096 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.340095 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-nb\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.363351 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v25c\" (UniqueName: \"kubernetes.io/projected/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-kube-api-access-7v25c\") pod \"dnsmasq-dns-6cd9fddc67-q99jj\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.428930 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.470394 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:55 crc kubenswrapper[5010]: I1126 17:03:55.945328 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-brrhc"] Nov 26 17:03:56 crc kubenswrapper[5010]: I1126 17:03:56.048413 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6cd9fddc67-q99jj"] Nov 26 17:03:56 crc kubenswrapper[5010]: W1126 17:03:56.056182 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9fbe6bde_b8d7_4c50_b149_b22bb8406db7.slice/crio-a9944eeb6226e9897a009fee0946965fe55cdb18695c0773184aa2658503431d WatchSource:0}: Error finding container a9944eeb6226e9897a009fee0946965fe55cdb18695c0773184aa2658503431d: Status 404 returned error can't find the container with id a9944eeb6226e9897a009fee0946965fe55cdb18695c0773184aa2658503431d Nov 26 17:03:56 crc kubenswrapper[5010]: I1126 17:03:56.087260 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-brrhc" event={"ID":"a3b12744-4bf5-44b6-9584-2e2edf84b267","Type":"ContainerStarted","Data":"b0f8bce24d8567411c408cef1857c9bda109d50d954f0fd1f83ce0d2e9315d0c"} Nov 26 17:03:56 crc kubenswrapper[5010]: I1126 17:03:56.089810 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" event={"ID":"9fbe6bde-b8d7-4c50-b149-b22bb8406db7","Type":"ContainerStarted","Data":"a9944eeb6226e9897a009fee0946965fe55cdb18695c0773184aa2658503431d"} Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.047097 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-fb965d994-4tnxk"] Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.051216 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.055536 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.060822 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-fb965d994-4tnxk"] Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.071067 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-etc-swift\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.071293 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-log-httpd\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.071472 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-run-httpd\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.071631 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4lgpt\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-kube-api-access-4lgpt\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.071818 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-config-data\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.071933 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-combined-ca-bundle\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.105055 5010 generic.go:334] "Generic (PLEG): container finished" podID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerID="fd0e2dd75a5a9f2b7d32d1aebdf972cd205cab5a93b38edb96cd52bd55b2ebba" exitCode=0 Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.105214 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" event={"ID":"9fbe6bde-b8d7-4c50-b149-b22bb8406db7","Type":"ContainerDied","Data":"fd0e2dd75a5a9f2b7d32d1aebdf972cd205cab5a93b38edb96cd52bd55b2ebba"} Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.108702 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-brrhc" event={"ID":"a3b12744-4bf5-44b6-9584-2e2edf84b267","Type":"ContainerStarted","Data":"9b3ad29ea8126e373180349fcd939c391fd686f4b25fb59ad64cfc3a3273fede"} Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.174696 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-log-httpd\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.180161 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-run-httpd\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.180472 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4lgpt\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-kube-api-access-4lgpt\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.178027 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-log-httpd\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.180625 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-run-httpd\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.183241 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-config-data\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.185881 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-combined-ca-bundle\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.186700 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-etc-swift\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.189231 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-config-data\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.193623 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-combined-ca-bundle\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.196919 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-etc-swift\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.209098 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4lgpt\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-kube-api-access-4lgpt\") pod \"swift-proxy-fb965d994-4tnxk\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:57 crc kubenswrapper[5010]: I1126 17:03:57.371037 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:58 crc kubenswrapper[5010]: I1126 17:03:58.059602 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-brrhc" podStartSLOduration=3.059579365 podStartE2EDuration="3.059579365s" podCreationTimestamp="2025-11-26 17:03:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:03:57.148501763 +0000 UTC m=+5857.939218911" watchObservedRunningTime="2025-11-26 17:03:58.059579365 +0000 UTC m=+5858.850296513" Nov 26 17:03:58 crc kubenswrapper[5010]: W1126 17:03:58.066932 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fc27ebb_14a5_4996_8110_e9aaee047c92.slice/crio-c97ac17bf531e9aefc21b190d17a2a3b5df92f938109090207160d8c363047ab WatchSource:0}: Error finding container c97ac17bf531e9aefc21b190d17a2a3b5df92f938109090207160d8c363047ab: Status 404 returned error can't find the container with id c97ac17bf531e9aefc21b190d17a2a3b5df92f938109090207160d8c363047ab Nov 26 17:03:58 crc kubenswrapper[5010]: I1126 17:03:58.069729 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-fb965d994-4tnxk"] Nov 26 17:03:58 crc kubenswrapper[5010]: I1126 17:03:58.135667 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" event={"ID":"9fbe6bde-b8d7-4c50-b149-b22bb8406db7","Type":"ContainerStarted","Data":"036bb9672543db02139a86d6101478dafac1fd93ce3d6598e5d61c794af593f7"} Nov 26 17:03:58 crc kubenswrapper[5010]: I1126 17:03:58.135798 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:03:58 crc kubenswrapper[5010]: I1126 17:03:58.137875 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-fb965d994-4tnxk" event={"ID":"4fc27ebb-14a5-4996-8110-e9aaee047c92","Type":"ContainerStarted","Data":"c97ac17bf531e9aefc21b190d17a2a3b5df92f938109090207160d8c363047ab"} Nov 26 17:03:58 crc kubenswrapper[5010]: I1126 17:03:58.171303 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" podStartSLOduration=3.171277734 podStartE2EDuration="3.171277734s" podCreationTimestamp="2025-11-26 17:03:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:03:58.15503897 +0000 UTC m=+5858.945756128" watchObservedRunningTime="2025-11-26 17:03:58.171277734 +0000 UTC m=+5858.961994882" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.148478 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-fb965d994-4tnxk" event={"ID":"4fc27ebb-14a5-4996-8110-e9aaee047c92","Type":"ContainerStarted","Data":"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9"} Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.148860 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-fb965d994-4tnxk" event={"ID":"4fc27ebb-14a5-4996-8110-e9aaee047c92","Type":"ContainerStarted","Data":"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2"} Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.149114 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.149134 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.174082 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-fb965d994-4tnxk" podStartSLOduration=2.1740613189999998 podStartE2EDuration="2.174061319s" podCreationTimestamp="2025-11-26 17:03:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:03:59.166986873 +0000 UTC m=+5859.957704051" watchObservedRunningTime="2025-11-26 17:03:59.174061319 +0000 UTC m=+5859.964778467" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.543711 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6d5d78b986-llp52"] Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.550147 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.552406 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.552514 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.563508 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6d5d78b986-llp52"] Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.735986 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28rpn\" (UniqueName: \"kubernetes.io/projected/c29d3e67-7707-42ab-b03f-d2240fef0672-kube-api-access-28rpn\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.736043 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c29d3e67-7707-42ab-b03f-d2240fef0672-run-httpd\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.736076 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-internal-tls-certs\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.736101 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-combined-ca-bundle\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.736310 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c29d3e67-7707-42ab-b03f-d2240fef0672-etc-swift\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.736526 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-public-tls-certs\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.736788 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c29d3e67-7707-42ab-b03f-d2240fef0672-log-httpd\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.736906 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-config-data\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842412 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-internal-tls-certs\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842480 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c29d3e67-7707-42ab-b03f-d2240fef0672-etc-swift\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842505 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-combined-ca-bundle\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842541 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-public-tls-certs\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842590 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c29d3e67-7707-42ab-b03f-d2240fef0672-log-httpd\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842626 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-config-data\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842702 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28rpn\" (UniqueName: \"kubernetes.io/projected/c29d3e67-7707-42ab-b03f-d2240fef0672-kube-api-access-28rpn\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.842781 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c29d3e67-7707-42ab-b03f-d2240fef0672-run-httpd\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.844036 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c29d3e67-7707-42ab-b03f-d2240fef0672-log-httpd\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.844821 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c29d3e67-7707-42ab-b03f-d2240fef0672-run-httpd\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.850299 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-config-data\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.850425 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c29d3e67-7707-42ab-b03f-d2240fef0672-etc-swift\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.857518 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-internal-tls-certs\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.862321 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-public-tls-certs\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.870522 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28rpn\" (UniqueName: \"kubernetes.io/projected/c29d3e67-7707-42ab-b03f-d2240fef0672-kube-api-access-28rpn\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:03:59 crc kubenswrapper[5010]: I1126 17:03:59.875215 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c29d3e67-7707-42ab-b03f-d2240fef0672-combined-ca-bundle\") pod \"swift-proxy-6d5d78b986-llp52\" (UID: \"c29d3e67-7707-42ab-b03f-d2240fef0672\") " pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:04:00 crc kubenswrapper[5010]: I1126 17:04:00.167993 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:04:00 crc kubenswrapper[5010]: I1126 17:04:00.810637 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6d5d78b986-llp52"] Nov 26 17:04:01 crc kubenswrapper[5010]: I1126 17:04:01.168496 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d5d78b986-llp52" event={"ID":"c29d3e67-7707-42ab-b03f-d2240fef0672","Type":"ContainerStarted","Data":"5dff74165612d88a5902ae7b9280358fd27495560e7725bc9d051b5de8f3ed97"} Nov 26 17:04:01 crc kubenswrapper[5010]: I1126 17:04:01.168875 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d5d78b986-llp52" event={"ID":"c29d3e67-7707-42ab-b03f-d2240fef0672","Type":"ContainerStarted","Data":"636d82e446d396f5e07d265aa46dbebf60e998df8e5668072b5876cc084e77ff"} Nov 26 17:04:01 crc kubenswrapper[5010]: I1126 17:04:01.171094 5010 generic.go:334] "Generic (PLEG): container finished" podID="a3b12744-4bf5-44b6-9584-2e2edf84b267" containerID="9b3ad29ea8126e373180349fcd939c391fd686f4b25fb59ad64cfc3a3273fede" exitCode=0 Nov 26 17:04:01 crc kubenswrapper[5010]: I1126 17:04:01.171129 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-brrhc" event={"ID":"a3b12744-4bf5-44b6-9584-2e2edf84b267","Type":"ContainerDied","Data":"9b3ad29ea8126e373180349fcd939c391fd686f4b25fb59ad64cfc3a3273fede"} Nov 26 17:04:01 crc kubenswrapper[5010]: I1126 17:04:01.892630 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:04:01 crc kubenswrapper[5010]: E1126 17:04:01.893132 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.213387 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6d5d78b986-llp52" event={"ID":"c29d3e67-7707-42ab-b03f-d2240fef0672","Type":"ContainerStarted","Data":"68a748bd4da03edf395663848f7e94fb710d5b951e24ec49537d14b4bd28689e"} Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.220136 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.220192 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.243155 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6d5d78b986-llp52" podStartSLOduration=3.243132093 podStartE2EDuration="3.243132093s" podCreationTimestamp="2025-11-26 17:03:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:04:02.240951668 +0000 UTC m=+5863.031668846" watchObservedRunningTime="2025-11-26 17:04:02.243132093 +0000 UTC m=+5863.033849241" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.614183 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.713239 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-dispersionconf\") pod \"a3b12744-4bf5-44b6-9584-2e2edf84b267\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.713291 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-combined-ca-bundle\") pod \"a3b12744-4bf5-44b6-9584-2e2edf84b267\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.713398 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dtl56\" (UniqueName: \"kubernetes.io/projected/a3b12744-4bf5-44b6-9584-2e2edf84b267-kube-api-access-dtl56\") pod \"a3b12744-4bf5-44b6-9584-2e2edf84b267\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.713479 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a3b12744-4bf5-44b6-9584-2e2edf84b267-etc-swift\") pod \"a3b12744-4bf5-44b6-9584-2e2edf84b267\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.714450 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a3b12744-4bf5-44b6-9584-2e2edf84b267-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "a3b12744-4bf5-44b6-9584-2e2edf84b267" (UID: "a3b12744-4bf5-44b6-9584-2e2edf84b267"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.719168 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a3b12744-4bf5-44b6-9584-2e2edf84b267-kube-api-access-dtl56" (OuterVolumeSpecName: "kube-api-access-dtl56") pod "a3b12744-4bf5-44b6-9584-2e2edf84b267" (UID: "a3b12744-4bf5-44b6-9584-2e2edf84b267"). InnerVolumeSpecName "kube-api-access-dtl56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.722460 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "a3b12744-4bf5-44b6-9584-2e2edf84b267" (UID: "a3b12744-4bf5-44b6-9584-2e2edf84b267"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.740565 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a3b12744-4bf5-44b6-9584-2e2edf84b267" (UID: "a3b12744-4bf5-44b6-9584-2e2edf84b267"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.816045 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-swiftconf\") pod \"a3b12744-4bf5-44b6-9584-2e2edf84b267\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.816449 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-ring-data-devices\") pod \"a3b12744-4bf5-44b6-9584-2e2edf84b267\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.817069 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "a3b12744-4bf5-44b6-9584-2e2edf84b267" (UID: "a3b12744-4bf5-44b6-9584-2e2edf84b267"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.816481 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-scripts\") pod \"a3b12744-4bf5-44b6-9584-2e2edf84b267\" (UID: \"a3b12744-4bf5-44b6-9584-2e2edf84b267\") " Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.818394 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dtl56\" (UniqueName: \"kubernetes.io/projected/a3b12744-4bf5-44b6-9584-2e2edf84b267-kube-api-access-dtl56\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.818428 5010 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/a3b12744-4bf5-44b6-9584-2e2edf84b267-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.818445 5010 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.818461 5010 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.818479 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.840701 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "a3b12744-4bf5-44b6-9584-2e2edf84b267" (UID: "a3b12744-4bf5-44b6-9584-2e2edf84b267"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.852336 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-scripts" (OuterVolumeSpecName: "scripts") pod "a3b12744-4bf5-44b6-9584-2e2edf84b267" (UID: "a3b12744-4bf5-44b6-9584-2e2edf84b267"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.920565 5010 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/a3b12744-4bf5-44b6-9584-2e2edf84b267-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:02 crc kubenswrapper[5010]: I1126 17:04:02.920606 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a3b12744-4bf5-44b6-9584-2e2edf84b267-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:03 crc kubenswrapper[5010]: I1126 17:04:03.228065 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-brrhc" Nov 26 17:04:03 crc kubenswrapper[5010]: I1126 17:04:03.228228 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-brrhc" event={"ID":"a3b12744-4bf5-44b6-9584-2e2edf84b267","Type":"ContainerDied","Data":"b0f8bce24d8567411c408cef1857c9bda109d50d954f0fd1f83ce0d2e9315d0c"} Nov 26 17:04:03 crc kubenswrapper[5010]: I1126 17:04:03.229342 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0f8bce24d8567411c408cef1857c9bda109d50d954f0fd1f83ce0d2e9315d0c" Nov 26 17:04:05 crc kubenswrapper[5010]: I1126 17:04:05.472869 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:04:05 crc kubenswrapper[5010]: I1126 17:04:05.558088 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8f57cd8c-znnc4"] Nov 26 17:04:05 crc kubenswrapper[5010]: I1126 17:04:05.558366 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" podUID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerName="dnsmasq-dns" containerID="cri-o://85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852" gracePeriod=10 Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.093971 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.178035 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-dns-svc\") pod \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.178086 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-config\") pod \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.178122 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-nb\") pod \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.178194 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-sb\") pod \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.178368 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srt96\" (UniqueName: \"kubernetes.io/projected/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-kube-api-access-srt96\") pod \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\" (UID: \"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff\") " Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.198379 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-kube-api-access-srt96" (OuterVolumeSpecName: "kube-api-access-srt96") pod "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" (UID: "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff"). InnerVolumeSpecName "kube-api-access-srt96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.222529 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" (UID: "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.228754 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-config" (OuterVolumeSpecName: "config") pod "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" (UID: "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.231661 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" (UID: "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.247542 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" (UID: "6a20a6f4-b9da-45e2-8a6b-ac75e47897ff"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.254429 5010 generic.go:334] "Generic (PLEG): container finished" podID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerID="85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852" exitCode=0 Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.254469 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" event={"ID":"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff","Type":"ContainerDied","Data":"85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852"} Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.254499 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" event={"ID":"6a20a6f4-b9da-45e2-8a6b-ac75e47897ff","Type":"ContainerDied","Data":"9720876f96aa66af521f7ea4a9f8e0ee9cb3affd27935fd69a94ebf7140e9111"} Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.254516 5010 scope.go:117] "RemoveContainer" containerID="85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.255088 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6b8f57cd8c-znnc4" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.274547 5010 scope.go:117] "RemoveContainer" containerID="422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.280899 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srt96\" (UniqueName: \"kubernetes.io/projected/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-kube-api-access-srt96\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.280931 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.280940 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.280949 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.280958 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.293896 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6b8f57cd8c-znnc4"] Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.307814 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6b8f57cd8c-znnc4"] Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.310357 5010 scope.go:117] "RemoveContainer" containerID="85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852" Nov 26 17:04:06 crc kubenswrapper[5010]: E1126 17:04:06.310852 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852\": container with ID starting with 85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852 not found: ID does not exist" containerID="85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.310983 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852"} err="failed to get container status \"85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852\": rpc error: code = NotFound desc = could not find container \"85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852\": container with ID starting with 85e9386c66d0bcabc03385fbe968956811c6ff5c4f34d643a48c2c5f1ef86852 not found: ID does not exist" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.311085 5010 scope.go:117] "RemoveContainer" containerID="422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71" Nov 26 17:04:06 crc kubenswrapper[5010]: E1126 17:04:06.311549 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71\": container with ID starting with 422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71 not found: ID does not exist" containerID="422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71" Nov 26 17:04:06 crc kubenswrapper[5010]: I1126 17:04:06.311589 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71"} err="failed to get container status \"422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71\": rpc error: code = NotFound desc = could not find container \"422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71\": container with ID starting with 422d2bd644f81ff000e41e78eda9410799baa2e9123c25bcc9edbf9a8ad93b71 not found: ID does not exist" Nov 26 17:04:07 crc kubenswrapper[5010]: I1126 17:04:07.405291 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:04:07 crc kubenswrapper[5010]: I1126 17:04:07.437030 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:04:07 crc kubenswrapper[5010]: I1126 17:04:07.901360 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" path="/var/lib/kubelet/pods/6a20a6f4-b9da-45e2-8a6b-ac75e47897ff/volumes" Nov 26 17:04:10 crc kubenswrapper[5010]: I1126 17:04:10.174436 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:04:10 crc kubenswrapper[5010]: I1126 17:04:10.174859 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6d5d78b986-llp52" Nov 26 17:04:10 crc kubenswrapper[5010]: I1126 17:04:10.273611 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-fb965d994-4tnxk"] Nov 26 17:04:10 crc kubenswrapper[5010]: I1126 17:04:10.273897 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-fb965d994-4tnxk" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-httpd" containerID="cri-o://7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2" gracePeriod=30 Nov 26 17:04:10 crc kubenswrapper[5010]: I1126 17:04:10.274018 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-fb965d994-4tnxk" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-server" containerID="cri-o://48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9" gracePeriod=30 Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.217788 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.322938 5010 generic.go:334] "Generic (PLEG): container finished" podID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerID="48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9" exitCode=0 Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.322970 5010 generic.go:334] "Generic (PLEG): container finished" podID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerID="7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2" exitCode=0 Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.322999 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-fb965d994-4tnxk" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.322996 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-fb965d994-4tnxk" event={"ID":"4fc27ebb-14a5-4996-8110-e9aaee047c92","Type":"ContainerDied","Data":"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9"} Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.323059 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-fb965d994-4tnxk" event={"ID":"4fc27ebb-14a5-4996-8110-e9aaee047c92","Type":"ContainerDied","Data":"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2"} Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.323072 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-fb965d994-4tnxk" event={"ID":"4fc27ebb-14a5-4996-8110-e9aaee047c92","Type":"ContainerDied","Data":"c97ac17bf531e9aefc21b190d17a2a3b5df92f938109090207160d8c363047ab"} Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.323089 5010 scope.go:117] "RemoveContainer" containerID="48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.340478 5010 scope.go:117] "RemoveContainer" containerID="7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.358583 5010 scope.go:117] "RemoveContainer" containerID="48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9" Nov 26 17:04:11 crc kubenswrapper[5010]: E1126 17:04:11.358976 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9\": container with ID starting with 48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9 not found: ID does not exist" containerID="48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.359003 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9"} err="failed to get container status \"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9\": rpc error: code = NotFound desc = could not find container \"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9\": container with ID starting with 48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9 not found: ID does not exist" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.359023 5010 scope.go:117] "RemoveContainer" containerID="7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2" Nov 26 17:04:11 crc kubenswrapper[5010]: E1126 17:04:11.359345 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2\": container with ID starting with 7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2 not found: ID does not exist" containerID="7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.359399 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2"} err="failed to get container status \"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2\": rpc error: code = NotFound desc = could not find container \"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2\": container with ID starting with 7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2 not found: ID does not exist" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.359431 5010 scope.go:117] "RemoveContainer" containerID="48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.360739 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9"} err="failed to get container status \"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9\": rpc error: code = NotFound desc = could not find container \"48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9\": container with ID starting with 48551a632bb2f96d60f67393f490d7636e4639a7af050f3f91a70993cbf423e9 not found: ID does not exist" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.360761 5010 scope.go:117] "RemoveContainer" containerID="7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.360983 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2"} err="failed to get container status \"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2\": rpc error: code = NotFound desc = could not find container \"7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2\": container with ID starting with 7927d10f059d8d591865de90fb6d718e39c5fee234a26d5189e9dd4d421f4db2 not found: ID does not exist" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.378408 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-combined-ca-bundle\") pod \"4fc27ebb-14a5-4996-8110-e9aaee047c92\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.378498 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4lgpt\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-kube-api-access-4lgpt\") pod \"4fc27ebb-14a5-4996-8110-e9aaee047c92\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.378524 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-log-httpd\") pod \"4fc27ebb-14a5-4996-8110-e9aaee047c92\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.378548 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-etc-swift\") pod \"4fc27ebb-14a5-4996-8110-e9aaee047c92\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.378724 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-config-data\") pod \"4fc27ebb-14a5-4996-8110-e9aaee047c92\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.378749 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-run-httpd\") pod \"4fc27ebb-14a5-4996-8110-e9aaee047c92\" (UID: \"4fc27ebb-14a5-4996-8110-e9aaee047c92\") " Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.379162 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4fc27ebb-14a5-4996-8110-e9aaee047c92" (UID: "4fc27ebb-14a5-4996-8110-e9aaee047c92"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.380343 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4fc27ebb-14a5-4996-8110-e9aaee047c92" (UID: "4fc27ebb-14a5-4996-8110-e9aaee047c92"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.394001 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "4fc27ebb-14a5-4996-8110-e9aaee047c92" (UID: "4fc27ebb-14a5-4996-8110-e9aaee047c92"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.399099 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-kube-api-access-4lgpt" (OuterVolumeSpecName: "kube-api-access-4lgpt") pod "4fc27ebb-14a5-4996-8110-e9aaee047c92" (UID: "4fc27ebb-14a5-4996-8110-e9aaee047c92"). InnerVolumeSpecName "kube-api-access-4lgpt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.437671 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4fc27ebb-14a5-4996-8110-e9aaee047c92" (UID: "4fc27ebb-14a5-4996-8110-e9aaee047c92"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.449867 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-config-data" (OuterVolumeSpecName: "config-data") pod "4fc27ebb-14a5-4996-8110-e9aaee047c92" (UID: "4fc27ebb-14a5-4996-8110-e9aaee047c92"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.480959 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.480992 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.481003 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4fc27ebb-14a5-4996-8110-e9aaee047c92-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.481012 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4lgpt\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-kube-api-access-4lgpt\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.481022 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4fc27ebb-14a5-4996-8110-e9aaee047c92-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.481029 5010 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/4fc27ebb-14a5-4996-8110-e9aaee047c92-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.654399 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-fb965d994-4tnxk"] Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.662656 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-fb965d994-4tnxk"] Nov 26 17:04:11 crc kubenswrapper[5010]: I1126 17:04:11.906980 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" path="/var/lib/kubelet/pods/4fc27ebb-14a5-4996-8110-e9aaee047c92/volumes" Nov 26 17:04:14 crc kubenswrapper[5010]: I1126 17:04:14.891803 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:04:14 crc kubenswrapper[5010]: E1126 17:04:14.892794 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.512794 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-xl79j"] Nov 26 17:04:16 crc kubenswrapper[5010]: E1126 17:04:16.513338 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerName="init" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513349 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerName="init" Nov 26 17:04:16 crc kubenswrapper[5010]: E1126 17:04:16.513371 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-server" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513377 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-server" Nov 26 17:04:16 crc kubenswrapper[5010]: E1126 17:04:16.513393 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerName="dnsmasq-dns" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513399 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerName="dnsmasq-dns" Nov 26 17:04:16 crc kubenswrapper[5010]: E1126 17:04:16.513411 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a3b12744-4bf5-44b6-9584-2e2edf84b267" containerName="swift-ring-rebalance" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513416 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a3b12744-4bf5-44b6-9584-2e2edf84b267" containerName="swift-ring-rebalance" Nov 26 17:04:16 crc kubenswrapper[5010]: E1126 17:04:16.513438 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-httpd" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513444 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-httpd" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513591 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a3b12744-4bf5-44b6-9584-2e2edf84b267" containerName="swift-ring-rebalance" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513600 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a20a6f4-b9da-45e2-8a6b-ac75e47897ff" containerName="dnsmasq-dns" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513614 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-httpd" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.513628 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fc27ebb-14a5-4996-8110-e9aaee047c92" containerName="proxy-server" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.514174 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.524765 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xl79j"] Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.625656 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-b021-account-create-update-tz28t"] Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.626977 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.629882 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.642852 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b021-account-create-update-tz28t"] Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.675017 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f499269-4475-44ec-8b84-4979a96f2412-operator-scripts\") pod \"cinder-db-create-xl79j\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.675197 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cttvp\" (UniqueName: \"kubernetes.io/projected/1f499269-4475-44ec-8b84-4979a96f2412-kube-api-access-cttvp\") pod \"cinder-db-create-xl79j\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.776815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/14d26833-10fc-4fa3-9dfd-a0497e5dc238-operator-scripts\") pod \"cinder-b021-account-create-update-tz28t\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.776900 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cttvp\" (UniqueName: \"kubernetes.io/projected/1f499269-4475-44ec-8b84-4979a96f2412-kube-api-access-cttvp\") pod \"cinder-db-create-xl79j\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.777071 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpwtw\" (UniqueName: \"kubernetes.io/projected/14d26833-10fc-4fa3-9dfd-a0497e5dc238-kube-api-access-lpwtw\") pod \"cinder-b021-account-create-update-tz28t\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.777106 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f499269-4475-44ec-8b84-4979a96f2412-operator-scripts\") pod \"cinder-db-create-xl79j\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.777983 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f499269-4475-44ec-8b84-4979a96f2412-operator-scripts\") pod \"cinder-db-create-xl79j\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.794320 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cttvp\" (UniqueName: \"kubernetes.io/projected/1f499269-4475-44ec-8b84-4979a96f2412-kube-api-access-cttvp\") pod \"cinder-db-create-xl79j\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.833375 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.878888 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpwtw\" (UniqueName: \"kubernetes.io/projected/14d26833-10fc-4fa3-9dfd-a0497e5dc238-kube-api-access-lpwtw\") pod \"cinder-b021-account-create-update-tz28t\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.878969 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/14d26833-10fc-4fa3-9dfd-a0497e5dc238-operator-scripts\") pod \"cinder-b021-account-create-update-tz28t\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.880293 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/14d26833-10fc-4fa3-9dfd-a0497e5dc238-operator-scripts\") pod \"cinder-b021-account-create-update-tz28t\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.907863 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpwtw\" (UniqueName: \"kubernetes.io/projected/14d26833-10fc-4fa3-9dfd-a0497e5dc238-kube-api-access-lpwtw\") pod \"cinder-b021-account-create-update-tz28t\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:16 crc kubenswrapper[5010]: I1126 17:04:16.948573 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:17 crc kubenswrapper[5010]: I1126 17:04:17.331523 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-xl79j"] Nov 26 17:04:17 crc kubenswrapper[5010]: W1126 17:04:17.336256 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1f499269_4475_44ec_8b84_4979a96f2412.slice/crio-16334d2a0ede3b09ae857748dd9a40a9d33d28b5087964dd99a1959f5e6e1de7 WatchSource:0}: Error finding container 16334d2a0ede3b09ae857748dd9a40a9d33d28b5087964dd99a1959f5e6e1de7: Status 404 returned error can't find the container with id 16334d2a0ede3b09ae857748dd9a40a9d33d28b5087964dd99a1959f5e6e1de7 Nov 26 17:04:17 crc kubenswrapper[5010]: I1126 17:04:17.373135 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xl79j" event={"ID":"1f499269-4475-44ec-8b84-4979a96f2412","Type":"ContainerStarted","Data":"16334d2a0ede3b09ae857748dd9a40a9d33d28b5087964dd99a1959f5e6e1de7"} Nov 26 17:04:17 crc kubenswrapper[5010]: I1126 17:04:17.446305 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b021-account-create-update-tz28t"] Nov 26 17:04:17 crc kubenswrapper[5010]: W1126 17:04:17.450651 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14d26833_10fc_4fa3_9dfd_a0497e5dc238.slice/crio-cb4cd68b126ca1a64218019e477843e18f0a6afc9096c7444fd228b43556281c WatchSource:0}: Error finding container cb4cd68b126ca1a64218019e477843e18f0a6afc9096c7444fd228b43556281c: Status 404 returned error can't find the container with id cb4cd68b126ca1a64218019e477843e18f0a6afc9096c7444fd228b43556281c Nov 26 17:04:18 crc kubenswrapper[5010]: I1126 17:04:18.382029 5010 generic.go:334] "Generic (PLEG): container finished" podID="1f499269-4475-44ec-8b84-4979a96f2412" containerID="da40ef4ab582018cb44d4316bc04185e043320117689038a8cca8834c267a79d" exitCode=0 Nov 26 17:04:18 crc kubenswrapper[5010]: I1126 17:04:18.382105 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xl79j" event={"ID":"1f499269-4475-44ec-8b84-4979a96f2412","Type":"ContainerDied","Data":"da40ef4ab582018cb44d4316bc04185e043320117689038a8cca8834c267a79d"} Nov 26 17:04:18 crc kubenswrapper[5010]: I1126 17:04:18.384003 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b021-account-create-update-tz28t" event={"ID":"14d26833-10fc-4fa3-9dfd-a0497e5dc238","Type":"ContainerStarted","Data":"7a1f56a419e244d5d029b5a003119c30a5ba5e4d78f3b4fd489fbb3ef38e92c8"} Nov 26 17:04:18 crc kubenswrapper[5010]: I1126 17:04:18.384147 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b021-account-create-update-tz28t" event={"ID":"14d26833-10fc-4fa3-9dfd-a0497e5dc238","Type":"ContainerStarted","Data":"cb4cd68b126ca1a64218019e477843e18f0a6afc9096c7444fd228b43556281c"} Nov 26 17:04:18 crc kubenswrapper[5010]: I1126 17:04:18.421386 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-b021-account-create-update-tz28t" podStartSLOduration=2.421365229 podStartE2EDuration="2.421365229s" podCreationTimestamp="2025-11-26 17:04:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:04:18.416603851 +0000 UTC m=+5879.207321029" watchObservedRunningTime="2025-11-26 17:04:18.421365229 +0000 UTC m=+5879.212082377" Nov 26 17:04:19 crc kubenswrapper[5010]: I1126 17:04:19.395882 5010 generic.go:334] "Generic (PLEG): container finished" podID="14d26833-10fc-4fa3-9dfd-a0497e5dc238" containerID="7a1f56a419e244d5d029b5a003119c30a5ba5e4d78f3b4fd489fbb3ef38e92c8" exitCode=0 Nov 26 17:04:19 crc kubenswrapper[5010]: I1126 17:04:19.395939 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b021-account-create-update-tz28t" event={"ID":"14d26833-10fc-4fa3-9dfd-a0497e5dc238","Type":"ContainerDied","Data":"7a1f56a419e244d5d029b5a003119c30a5ba5e4d78f3b4fd489fbb3ef38e92c8"} Nov 26 17:04:19 crc kubenswrapper[5010]: I1126 17:04:19.786259 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:19 crc kubenswrapper[5010]: I1126 17:04:19.954842 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f499269-4475-44ec-8b84-4979a96f2412-operator-scripts\") pod \"1f499269-4475-44ec-8b84-4979a96f2412\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " Nov 26 17:04:19 crc kubenswrapper[5010]: I1126 17:04:19.954964 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cttvp\" (UniqueName: \"kubernetes.io/projected/1f499269-4475-44ec-8b84-4979a96f2412-kube-api-access-cttvp\") pod \"1f499269-4475-44ec-8b84-4979a96f2412\" (UID: \"1f499269-4475-44ec-8b84-4979a96f2412\") " Nov 26 17:04:19 crc kubenswrapper[5010]: I1126 17:04:19.956296 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1f499269-4475-44ec-8b84-4979a96f2412-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1f499269-4475-44ec-8b84-4979a96f2412" (UID: "1f499269-4475-44ec-8b84-4979a96f2412"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:19 crc kubenswrapper[5010]: I1126 17:04:19.978308 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f499269-4475-44ec-8b84-4979a96f2412-kube-api-access-cttvp" (OuterVolumeSpecName: "kube-api-access-cttvp") pod "1f499269-4475-44ec-8b84-4979a96f2412" (UID: "1f499269-4475-44ec-8b84-4979a96f2412"). InnerVolumeSpecName "kube-api-access-cttvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.057360 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1f499269-4475-44ec-8b84-4979a96f2412-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.057403 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cttvp\" (UniqueName: \"kubernetes.io/projected/1f499269-4475-44ec-8b84-4979a96f2412-kube-api-access-cttvp\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.407296 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-xl79j" Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.407301 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-xl79j" event={"ID":"1f499269-4475-44ec-8b84-4979a96f2412","Type":"ContainerDied","Data":"16334d2a0ede3b09ae857748dd9a40a9d33d28b5087964dd99a1959f5e6e1de7"} Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.407370 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="16334d2a0ede3b09ae857748dd9a40a9d33d28b5087964dd99a1959f5e6e1de7" Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.890265 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.975216 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpwtw\" (UniqueName: \"kubernetes.io/projected/14d26833-10fc-4fa3-9dfd-a0497e5dc238-kube-api-access-lpwtw\") pod \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.975282 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/14d26833-10fc-4fa3-9dfd-a0497e5dc238-operator-scripts\") pod \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\" (UID: \"14d26833-10fc-4fa3-9dfd-a0497e5dc238\") " Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.975968 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/14d26833-10fc-4fa3-9dfd-a0497e5dc238-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "14d26833-10fc-4fa3-9dfd-a0497e5dc238" (UID: "14d26833-10fc-4fa3-9dfd-a0497e5dc238"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:20 crc kubenswrapper[5010]: I1126 17:04:20.981393 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14d26833-10fc-4fa3-9dfd-a0497e5dc238-kube-api-access-lpwtw" (OuterVolumeSpecName: "kube-api-access-lpwtw") pod "14d26833-10fc-4fa3-9dfd-a0497e5dc238" (UID: "14d26833-10fc-4fa3-9dfd-a0497e5dc238"). InnerVolumeSpecName "kube-api-access-lpwtw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.077400 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpwtw\" (UniqueName: \"kubernetes.io/projected/14d26833-10fc-4fa3-9dfd-a0497e5dc238-kube-api-access-lpwtw\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.077443 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/14d26833-10fc-4fa3-9dfd-a0497e5dc238-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.427468 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b021-account-create-update-tz28t" event={"ID":"14d26833-10fc-4fa3-9dfd-a0497e5dc238","Type":"ContainerDied","Data":"cb4cd68b126ca1a64218019e477843e18f0a6afc9096c7444fd228b43556281c"} Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.427525 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb4cd68b126ca1a64218019e477843e18f0a6afc9096c7444fd228b43556281c" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.428538 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b021-account-create-update-tz28t" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.886888 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nzqw8"] Nov 26 17:04:21 crc kubenswrapper[5010]: E1126 17:04:21.887738 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14d26833-10fc-4fa3-9dfd-a0497e5dc238" containerName="mariadb-account-create-update" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.887761 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="14d26833-10fc-4fa3-9dfd-a0497e5dc238" containerName="mariadb-account-create-update" Nov 26 17:04:21 crc kubenswrapper[5010]: E1126 17:04:21.887784 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f499269-4475-44ec-8b84-4979a96f2412" containerName="mariadb-database-create" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.887792 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f499269-4475-44ec-8b84-4979a96f2412" containerName="mariadb-database-create" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.888002 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="14d26833-10fc-4fa3-9dfd-a0497e5dc238" containerName="mariadb-account-create-update" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.888038 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f499269-4475-44ec-8b84-4979a96f2412" containerName="mariadb-database-create" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.889831 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.908532 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nzqw8"] Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.993831 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-utilities\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.993887 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wl6l9\" (UniqueName: \"kubernetes.io/projected/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-kube-api-access-wl6l9\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:21 crc kubenswrapper[5010]: I1126 17:04:21.993911 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-catalog-content\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.095925 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wl6l9\" (UniqueName: \"kubernetes.io/projected/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-kube-api-access-wl6l9\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.095983 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-catalog-content\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.096137 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-utilities\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.096498 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-catalog-content\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.096514 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-utilities\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.116486 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wl6l9\" (UniqueName: \"kubernetes.io/projected/8eb4c74c-64c1-41b6-ae72-dd032b17bd3e-kube-api-access-wl6l9\") pod \"community-operators-nzqw8\" (UID: \"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e\") " pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.213423 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:22 crc kubenswrapper[5010]: I1126 17:04:22.718931 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nzqw8"] Nov 26 17:04:23 crc kubenswrapper[5010]: I1126 17:04:23.455670 5010 generic.go:334] "Generic (PLEG): container finished" podID="8eb4c74c-64c1-41b6-ae72-dd032b17bd3e" containerID="ce9afdfa2ef4ce16a23a06c7ba4303fa81c3eed648bb15793d2faf1725c6e4c6" exitCode=0 Nov 26 17:04:23 crc kubenswrapper[5010]: I1126 17:04:23.455910 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nzqw8" event={"ID":"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e","Type":"ContainerDied","Data":"ce9afdfa2ef4ce16a23a06c7ba4303fa81c3eed648bb15793d2faf1725c6e4c6"} Nov 26 17:04:23 crc kubenswrapper[5010]: I1126 17:04:23.456059 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nzqw8" event={"ID":"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e","Type":"ContainerStarted","Data":"e6f63e8eb27c1183aa659af7d93ead4bd81e1c5f37be49dd95f42aa9dd633d3c"} Nov 26 17:04:23 crc kubenswrapper[5010]: I1126 17:04:23.458200 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:04:25 crc kubenswrapper[5010]: I1126 17:04:25.893667 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:04:25 crc kubenswrapper[5010]: E1126 17:04:25.894215 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.863504 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-qzk97"] Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.865232 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.867899 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-pmnxt" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.868004 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.868112 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.877024 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qzk97"] Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.992334 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4545040-6d0b-4c50-87bf-7963256037cd-etc-machine-id\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.992415 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-db-sync-config-data\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.993003 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7s6l\" (UniqueName: \"kubernetes.io/projected/d4545040-6d0b-4c50-87bf-7963256037cd-kube-api-access-l7s6l\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.993113 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-scripts\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.993136 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-config-data\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:26 crc kubenswrapper[5010]: I1126 17:04:26.993469 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-combined-ca-bundle\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.095290 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7s6l\" (UniqueName: \"kubernetes.io/projected/d4545040-6d0b-4c50-87bf-7963256037cd-kube-api-access-l7s6l\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.095356 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-scripts\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.095388 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-config-data\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.095479 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-combined-ca-bundle\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.095544 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4545040-6d0b-4c50-87bf-7963256037cd-etc-machine-id\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.095573 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-db-sync-config-data\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.095655 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4545040-6d0b-4c50-87bf-7963256037cd-etc-machine-id\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.100924 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-combined-ca-bundle\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.100925 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-scripts\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.105460 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-db-sync-config-data\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.113570 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-config-data\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.119376 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7s6l\" (UniqueName: \"kubernetes.io/projected/d4545040-6d0b-4c50-87bf-7963256037cd-kube-api-access-l7s6l\") pod \"cinder-db-sync-qzk97\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.188561 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:27 crc kubenswrapper[5010]: I1126 17:04:27.727444 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qzk97"] Nov 26 17:04:27 crc kubenswrapper[5010]: W1126 17:04:27.739873 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4545040_6d0b_4c50_87bf_7963256037cd.slice/crio-1df000c1e5f8d8ec1c3c62891d6a126e0b925c5dc5e48faef451e344f6d69358 WatchSource:0}: Error finding container 1df000c1e5f8d8ec1c3c62891d6a126e0b925c5dc5e48faef451e344f6d69358: Status 404 returned error can't find the container with id 1df000c1e5f8d8ec1c3c62891d6a126e0b925c5dc5e48faef451e344f6d69358 Nov 26 17:04:28 crc kubenswrapper[5010]: I1126 17:04:28.509220 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qzk97" event={"ID":"d4545040-6d0b-4c50-87bf-7963256037cd","Type":"ContainerStarted","Data":"368465e7c170c2129d8e4403b53f6d879901aa871f87407b0624efcffd21b987"} Nov 26 17:04:28 crc kubenswrapper[5010]: I1126 17:04:28.509642 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qzk97" event={"ID":"d4545040-6d0b-4c50-87bf-7963256037cd","Type":"ContainerStarted","Data":"1df000c1e5f8d8ec1c3c62891d6a126e0b925c5dc5e48faef451e344f6d69358"} Nov 26 17:04:28 crc kubenswrapper[5010]: I1126 17:04:28.515375 5010 generic.go:334] "Generic (PLEG): container finished" podID="8eb4c74c-64c1-41b6-ae72-dd032b17bd3e" containerID="0b1ca20beb402867587f91571366bcdc147791673d8bedf1e254b023b4ef5318" exitCode=0 Nov 26 17:04:28 crc kubenswrapper[5010]: I1126 17:04:28.515422 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nzqw8" event={"ID":"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e","Type":"ContainerDied","Data":"0b1ca20beb402867587f91571366bcdc147791673d8bedf1e254b023b4ef5318"} Nov 26 17:04:28 crc kubenswrapper[5010]: I1126 17:04:28.535992 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-qzk97" podStartSLOduration=2.535960232 podStartE2EDuration="2.535960232s" podCreationTimestamp="2025-11-26 17:04:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:04:28.52824131 +0000 UTC m=+5889.318958458" watchObservedRunningTime="2025-11-26 17:04:28.535960232 +0000 UTC m=+5889.326677390" Nov 26 17:04:30 crc kubenswrapper[5010]: I1126 17:04:30.542485 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nzqw8" event={"ID":"8eb4c74c-64c1-41b6-ae72-dd032b17bd3e","Type":"ContainerStarted","Data":"f791c920777b434df353061c710a4bf041178517264b8c9a0ace7167ce5ecc9e"} Nov 26 17:04:30 crc kubenswrapper[5010]: I1126 17:04:30.583699 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nzqw8" podStartSLOduration=3.756182031 podStartE2EDuration="9.583675859s" podCreationTimestamp="2025-11-26 17:04:21 +0000 UTC" firstStartedPulling="2025-11-26 17:04:23.457965895 +0000 UTC m=+5884.248683043" lastFinishedPulling="2025-11-26 17:04:29.285459703 +0000 UTC m=+5890.076176871" observedRunningTime="2025-11-26 17:04:30.569152297 +0000 UTC m=+5891.359869505" watchObservedRunningTime="2025-11-26 17:04:30.583675859 +0000 UTC m=+5891.374393037" Nov 26 17:04:32 crc kubenswrapper[5010]: I1126 17:04:32.214414 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:32 crc kubenswrapper[5010]: I1126 17:04:32.215613 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:32 crc kubenswrapper[5010]: I1126 17:04:32.257184 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:32 crc kubenswrapper[5010]: I1126 17:04:32.568900 5010 generic.go:334] "Generic (PLEG): container finished" podID="d4545040-6d0b-4c50-87bf-7963256037cd" containerID="368465e7c170c2129d8e4403b53f6d879901aa871f87407b0624efcffd21b987" exitCode=0 Nov 26 17:04:32 crc kubenswrapper[5010]: I1126 17:04:32.569290 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qzk97" event={"ID":"d4545040-6d0b-4c50-87bf-7963256037cd","Type":"ContainerDied","Data":"368465e7c170c2129d8e4403b53f6d879901aa871f87407b0624efcffd21b987"} Nov 26 17:04:33 crc kubenswrapper[5010]: I1126 17:04:33.879018 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.031221 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-config-data\") pod \"d4545040-6d0b-4c50-87bf-7963256037cd\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.031282 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-scripts\") pod \"d4545040-6d0b-4c50-87bf-7963256037cd\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.031340 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4545040-6d0b-4c50-87bf-7963256037cd-etc-machine-id\") pod \"d4545040-6d0b-4c50-87bf-7963256037cd\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.031385 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-db-sync-config-data\") pod \"d4545040-6d0b-4c50-87bf-7963256037cd\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.031409 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-combined-ca-bundle\") pod \"d4545040-6d0b-4c50-87bf-7963256037cd\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.031449 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d4545040-6d0b-4c50-87bf-7963256037cd-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d4545040-6d0b-4c50-87bf-7963256037cd" (UID: "d4545040-6d0b-4c50-87bf-7963256037cd"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.031482 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7s6l\" (UniqueName: \"kubernetes.io/projected/d4545040-6d0b-4c50-87bf-7963256037cd-kube-api-access-l7s6l\") pod \"d4545040-6d0b-4c50-87bf-7963256037cd\" (UID: \"d4545040-6d0b-4c50-87bf-7963256037cd\") " Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.032190 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d4545040-6d0b-4c50-87bf-7963256037cd-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.037595 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-scripts" (OuterVolumeSpecName: "scripts") pod "d4545040-6d0b-4c50-87bf-7963256037cd" (UID: "d4545040-6d0b-4c50-87bf-7963256037cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.038999 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d4545040-6d0b-4c50-87bf-7963256037cd" (UID: "d4545040-6d0b-4c50-87bf-7963256037cd"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.041040 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4545040-6d0b-4c50-87bf-7963256037cd-kube-api-access-l7s6l" (OuterVolumeSpecName: "kube-api-access-l7s6l") pod "d4545040-6d0b-4c50-87bf-7963256037cd" (UID: "d4545040-6d0b-4c50-87bf-7963256037cd"). InnerVolumeSpecName "kube-api-access-l7s6l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.077059 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d4545040-6d0b-4c50-87bf-7963256037cd" (UID: "d4545040-6d0b-4c50-87bf-7963256037cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.082104 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-config-data" (OuterVolumeSpecName: "config-data") pod "d4545040-6d0b-4c50-87bf-7963256037cd" (UID: "d4545040-6d0b-4c50-87bf-7963256037cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.134004 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7s6l\" (UniqueName: \"kubernetes.io/projected/d4545040-6d0b-4c50-87bf-7963256037cd-kube-api-access-l7s6l\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.134044 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.134059 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.134073 5010 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.134085 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4545040-6d0b-4c50-87bf-7963256037cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.587119 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qzk97" event={"ID":"d4545040-6d0b-4c50-87bf-7963256037cd","Type":"ContainerDied","Data":"1df000c1e5f8d8ec1c3c62891d6a126e0b925c5dc5e48faef451e344f6d69358"} Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.587158 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1df000c1e5f8d8ec1c3c62891d6a126e0b925c5dc5e48faef451e344f6d69358" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.587182 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qzk97" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.955689 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg"] Nov 26 17:04:34 crc kubenswrapper[5010]: E1126 17:04:34.957107 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4545040-6d0b-4c50-87bf-7963256037cd" containerName="cinder-db-sync" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.957138 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4545040-6d0b-4c50-87bf-7963256037cd" containerName="cinder-db-sync" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.957355 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4545040-6d0b-4c50-87bf-7963256037cd" containerName="cinder-db-sync" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.958463 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:34 crc kubenswrapper[5010]: I1126 17:04:34.996348 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg"] Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.058273 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-config\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.058325 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-nb\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.058382 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-dns-svc\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.058406 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-sb\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.058599 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbzvp\" (UniqueName: \"kubernetes.io/projected/f4e11ddd-5ed3-4e07-bd88-d132154296e0-kube-api-access-qbzvp\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.159825 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbzvp\" (UniqueName: \"kubernetes.io/projected/f4e11ddd-5ed3-4e07-bd88-d132154296e0-kube-api-access-qbzvp\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.160196 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-config\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.160220 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-nb\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.160255 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-dns-svc\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.160273 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-sb\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.161104 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-sb\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.161470 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-config\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.161542 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-nb\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.161641 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-dns-svc\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.178827 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbzvp\" (UniqueName: \"kubernetes.io/projected/f4e11ddd-5ed3-4e07-bd88-d132154296e0-kube-api-access-qbzvp\") pod \"dnsmasq-dns-7bfbdbfc4c-cmhwg\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.272513 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.281168 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.283553 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.293543 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-pmnxt" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.293864 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.294141 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.294373 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.299670 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.373549 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.373623 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data-custom\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.373647 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkm9c\" (UniqueName: \"kubernetes.io/projected/d3cb5859-80d9-40e5-a96c-18e36dd42c93-kube-api-access-nkm9c\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.373690 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3cb5859-80d9-40e5-a96c-18e36dd42c93-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.373764 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3cb5859-80d9-40e5-a96c-18e36dd42c93-logs\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.373818 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.373863 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-scripts\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.475665 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3cb5859-80d9-40e5-a96c-18e36dd42c93-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.475781 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3cb5859-80d9-40e5-a96c-18e36dd42c93-logs\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.475837 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3cb5859-80d9-40e5-a96c-18e36dd42c93-etc-machine-id\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.475857 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.475922 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-scripts\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.475958 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.475990 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data-custom\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.476018 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkm9c\" (UniqueName: \"kubernetes.io/projected/d3cb5859-80d9-40e5-a96c-18e36dd42c93-kube-api-access-nkm9c\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.476499 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3cb5859-80d9-40e5-a96c-18e36dd42c93-logs\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.481984 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.482159 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-scripts\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.486486 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.495292 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data-custom\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.499195 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkm9c\" (UniqueName: \"kubernetes.io/projected/d3cb5859-80d9-40e5-a96c-18e36dd42c93-kube-api-access-nkm9c\") pod \"cinder-api-0\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.610298 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:04:35 crc kubenswrapper[5010]: I1126 17:04:35.823372 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg"] Nov 26 17:04:36 crc kubenswrapper[5010]: I1126 17:04:36.107971 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:04:36 crc kubenswrapper[5010]: W1126 17:04:36.132165 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd3cb5859_80d9_40e5_a96c_18e36dd42c93.slice/crio-43567efb4d08ad0c859a2c4f09f5a21e316eb9ae1ebebf44432397a845e0deca WatchSource:0}: Error finding container 43567efb4d08ad0c859a2c4f09f5a21e316eb9ae1ebebf44432397a845e0deca: Status 404 returned error can't find the container with id 43567efb4d08ad0c859a2c4f09f5a21e316eb9ae1ebebf44432397a845e0deca Nov 26 17:04:36 crc kubenswrapper[5010]: I1126 17:04:36.632504 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3cb5859-80d9-40e5-a96c-18e36dd42c93","Type":"ContainerStarted","Data":"43567efb4d08ad0c859a2c4f09f5a21e316eb9ae1ebebf44432397a845e0deca"} Nov 26 17:04:36 crc kubenswrapper[5010]: I1126 17:04:36.638375 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerID="f934f33c7c76791a952a6149276975bb54df481233b95422be96189ca09f351d" exitCode=0 Nov 26 17:04:36 crc kubenswrapper[5010]: I1126 17:04:36.638418 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" event={"ID":"f4e11ddd-5ed3-4e07-bd88-d132154296e0","Type":"ContainerDied","Data":"f934f33c7c76791a952a6149276975bb54df481233b95422be96189ca09f351d"} Nov 26 17:04:36 crc kubenswrapper[5010]: I1126 17:04:36.638443 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" event={"ID":"f4e11ddd-5ed3-4e07-bd88-d132154296e0","Type":"ContainerStarted","Data":"8344b13fc745a337c966991a83bc20b683c3b9d6dc412bf81c51a852beb77e33"} Nov 26 17:04:36 crc kubenswrapper[5010]: I1126 17:04:36.980267 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.649879 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" event={"ID":"f4e11ddd-5ed3-4e07-bd88-d132154296e0","Type":"ContainerStarted","Data":"81f63b3382fa1f6590b66cd8c7ec7644264a36611d2990f8226e8e1bda34f2e3"} Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.650248 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.652598 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3cb5859-80d9-40e5-a96c-18e36dd42c93","Type":"ContainerStarted","Data":"31d983b7c2e9337a69a2536624a78e0e58f7766ab308094b198372c43d8f2583"} Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.652642 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3cb5859-80d9-40e5-a96c-18e36dd42c93","Type":"ContainerStarted","Data":"38f6bee1ab9abb44eaaf814c67d89a1ee3233e4c4ae7008335c903cfa3cde9fc"} Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.652788 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api-log" containerID="cri-o://38f6bee1ab9abb44eaaf814c67d89a1ee3233e4c4ae7008335c903cfa3cde9fc" gracePeriod=30 Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.652859 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.652865 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api" containerID="cri-o://31d983b7c2e9337a69a2536624a78e0e58f7766ab308094b198372c43d8f2583" gracePeriod=30 Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.671660 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" podStartSLOduration=3.671641013 podStartE2EDuration="3.671641013s" podCreationTimestamp="2025-11-26 17:04:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:04:37.671076669 +0000 UTC m=+5898.461793827" watchObservedRunningTime="2025-11-26 17:04:37.671641013 +0000 UTC m=+5898.462358191" Nov 26 17:04:37 crc kubenswrapper[5010]: I1126 17:04:37.698869 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.69884666 podStartE2EDuration="2.69884666s" podCreationTimestamp="2025-11-26 17:04:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:04:37.69081849 +0000 UTC m=+5898.481535648" watchObservedRunningTime="2025-11-26 17:04:37.69884666 +0000 UTC m=+5898.489563808" Nov 26 17:04:38 crc kubenswrapper[5010]: I1126 17:04:38.665636 5010 generic.go:334] "Generic (PLEG): container finished" podID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerID="38f6bee1ab9abb44eaaf814c67d89a1ee3233e4c4ae7008335c903cfa3cde9fc" exitCode=143 Nov 26 17:04:38 crc kubenswrapper[5010]: I1126 17:04:38.665752 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3cb5859-80d9-40e5-a96c-18e36dd42c93","Type":"ContainerDied","Data":"38f6bee1ab9abb44eaaf814c67d89a1ee3233e4c4ae7008335c903cfa3cde9fc"} Nov 26 17:04:40 crc kubenswrapper[5010]: I1126 17:04:40.892240 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:04:40 crc kubenswrapper[5010]: E1126 17:04:40.893041 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:04:42 crc kubenswrapper[5010]: I1126 17:04:42.255557 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nzqw8" Nov 26 17:04:42 crc kubenswrapper[5010]: I1126 17:04:42.318319 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nzqw8"] Nov 26 17:04:42 crc kubenswrapper[5010]: I1126 17:04:42.364191 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mqwcz"] Nov 26 17:04:42 crc kubenswrapper[5010]: I1126 17:04:42.364485 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mqwcz" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="registry-server" containerID="cri-o://4cdcadc4c51fdcb805528a5f52d9bc52363241f36af0106bb2cee4bae95ceb61" gracePeriod=2 Nov 26 17:04:42 crc kubenswrapper[5010]: I1126 17:04:42.711398 5010 generic.go:334] "Generic (PLEG): container finished" podID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerID="4cdcadc4c51fdcb805528a5f52d9bc52363241f36af0106bb2cee4bae95ceb61" exitCode=0 Nov 26 17:04:42 crc kubenswrapper[5010]: I1126 17:04:42.712240 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mqwcz" event={"ID":"90967318-80f9-4d7a-81f5-78978bc25ab8","Type":"ContainerDied","Data":"4cdcadc4c51fdcb805528a5f52d9bc52363241f36af0106bb2cee4bae95ceb61"} Nov 26 17:04:42 crc kubenswrapper[5010]: I1126 17:04:42.905502 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.025021 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7vzww\" (UniqueName: \"kubernetes.io/projected/90967318-80f9-4d7a-81f5-78978bc25ab8-kube-api-access-7vzww\") pod \"90967318-80f9-4d7a-81f5-78978bc25ab8\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.025113 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-utilities\") pod \"90967318-80f9-4d7a-81f5-78978bc25ab8\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.025135 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-catalog-content\") pod \"90967318-80f9-4d7a-81f5-78978bc25ab8\" (UID: \"90967318-80f9-4d7a-81f5-78978bc25ab8\") " Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.032739 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90967318-80f9-4d7a-81f5-78978bc25ab8-kube-api-access-7vzww" (OuterVolumeSpecName: "kube-api-access-7vzww") pod "90967318-80f9-4d7a-81f5-78978bc25ab8" (UID: "90967318-80f9-4d7a-81f5-78978bc25ab8"). InnerVolumeSpecName "kube-api-access-7vzww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.033456 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-utilities" (OuterVolumeSpecName: "utilities") pod "90967318-80f9-4d7a-81f5-78978bc25ab8" (UID: "90967318-80f9-4d7a-81f5-78978bc25ab8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.079810 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90967318-80f9-4d7a-81f5-78978bc25ab8" (UID: "90967318-80f9-4d7a-81f5-78978bc25ab8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.132757 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.132790 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90967318-80f9-4d7a-81f5-78978bc25ab8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.132806 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7vzww\" (UniqueName: \"kubernetes.io/projected/90967318-80f9-4d7a-81f5-78978bc25ab8-kube-api-access-7vzww\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.725884 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mqwcz" event={"ID":"90967318-80f9-4d7a-81f5-78978bc25ab8","Type":"ContainerDied","Data":"0fc226de84263ab753dfab427261a9a7e4b254805844730d3022fe3e37171797"} Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.726259 5010 scope.go:117] "RemoveContainer" containerID="4cdcadc4c51fdcb805528a5f52d9bc52363241f36af0106bb2cee4bae95ceb61" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.726053 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mqwcz" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.772195 5010 scope.go:117] "RemoveContainer" containerID="76d8ddb9385df6e6a13e2ee4a4de5037115eeb9aa34d8ec021d6534ece00a5c8" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.786958 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mqwcz"] Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.796015 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mqwcz"] Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.804969 5010 scope.go:117] "RemoveContainer" containerID="a47fba56f4f89d049a9c9942ba981bcb043b0ce77b2c8bfcaf81126f9ff6be1b" Nov 26 17:04:43 crc kubenswrapper[5010]: I1126 17:04:43.902299 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" path="/var/lib/kubelet/pods/90967318-80f9-4d7a-81f5-78978bc25ab8/volumes" Nov 26 17:04:45 crc kubenswrapper[5010]: I1126 17:04:45.283735 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:04:45 crc kubenswrapper[5010]: I1126 17:04:45.351569 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cd9fddc67-q99jj"] Nov 26 17:04:45 crc kubenswrapper[5010]: I1126 17:04:45.352177 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerName="dnsmasq-dns" containerID="cri-o://036bb9672543db02139a86d6101478dafac1fd93ce3d6598e5d61c794af593f7" gracePeriod=10 Nov 26 17:04:45 crc kubenswrapper[5010]: I1126 17:04:45.471479 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.59:5353: connect: connection refused" Nov 26 17:04:45 crc kubenswrapper[5010]: I1126 17:04:45.756408 5010 generic.go:334] "Generic (PLEG): container finished" podID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerID="036bb9672543db02139a86d6101478dafac1fd93ce3d6598e5d61c794af593f7" exitCode=0 Nov 26 17:04:45 crc kubenswrapper[5010]: I1126 17:04:45.756467 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" event={"ID":"9fbe6bde-b8d7-4c50-b149-b22bb8406db7","Type":"ContainerDied","Data":"036bb9672543db02139a86d6101478dafac1fd93ce3d6598e5d61c794af593f7"} Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.418000 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.598547 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb\") pod \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.599192 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-dns-svc\") pod \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.599297 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-config\") pod \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.599331 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7v25c\" (UniqueName: \"kubernetes.io/projected/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-kube-api-access-7v25c\") pod \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.599416 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-nb\") pod \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.607004 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-kube-api-access-7v25c" (OuterVolumeSpecName: "kube-api-access-7v25c") pod "9fbe6bde-b8d7-4c50-b149-b22bb8406db7" (UID: "9fbe6bde-b8d7-4c50-b149-b22bb8406db7"). InnerVolumeSpecName "kube-api-access-7v25c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.659507 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9fbe6bde-b8d7-4c50-b149-b22bb8406db7" (UID: "9fbe6bde-b8d7-4c50-b149-b22bb8406db7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.661639 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-config" (OuterVolumeSpecName: "config") pod "9fbe6bde-b8d7-4c50-b149-b22bb8406db7" (UID: "9fbe6bde-b8d7-4c50-b149-b22bb8406db7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:46 crc kubenswrapper[5010]: E1126 17:04:46.679183 5010 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb podName:9fbe6bde-b8d7-4c50-b149-b22bb8406db7 nodeName:}" failed. No retries permitted until 2025-11-26 17:04:47.179153716 +0000 UTC m=+5907.969870864 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "ovsdbserver-sb" (UniqueName: "kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb") pod "9fbe6bde-b8d7-4c50-b149-b22bb8406db7" (UID: "9fbe6bde-b8d7-4c50-b149-b22bb8406db7") : error deleting /var/lib/kubelet/pods/9fbe6bde-b8d7-4c50-b149-b22bb8406db7/volume-subpaths: remove /var/lib/kubelet/pods/9fbe6bde-b8d7-4c50-b149-b22bb8406db7/volume-subpaths: no such file or directory Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.679387 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9fbe6bde-b8d7-4c50-b149-b22bb8406db7" (UID: "9fbe6bde-b8d7-4c50-b149-b22bb8406db7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.702182 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.702238 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.702254 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7v25c\" (UniqueName: \"kubernetes.io/projected/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-kube-api-access-7v25c\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.702268 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.766600 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" event={"ID":"9fbe6bde-b8d7-4c50-b149-b22bb8406db7","Type":"ContainerDied","Data":"a9944eeb6226e9897a009fee0946965fe55cdb18695c0773184aa2658503431d"} Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.766658 5010 scope.go:117] "RemoveContainer" containerID="036bb9672543db02139a86d6101478dafac1fd93ce3d6598e5d61c794af593f7" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.766729 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6cd9fddc67-q99jj" Nov 26 17:04:46 crc kubenswrapper[5010]: I1126 17:04:46.792969 5010 scope.go:117] "RemoveContainer" containerID="fd0e2dd75a5a9f2b7d32d1aebdf972cd205cab5a93b38edb96cd52bd55b2ebba" Nov 26 17:04:47 crc kubenswrapper[5010]: I1126 17:04:47.211996 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb\") pod \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\" (UID: \"9fbe6bde-b8d7-4c50-b149-b22bb8406db7\") " Nov 26 17:04:47 crc kubenswrapper[5010]: I1126 17:04:47.212523 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9fbe6bde-b8d7-4c50-b149-b22bb8406db7" (UID: "9fbe6bde-b8d7-4c50-b149-b22bb8406db7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:04:47 crc kubenswrapper[5010]: I1126 17:04:47.313916 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9fbe6bde-b8d7-4c50-b149-b22bb8406db7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:04:47 crc kubenswrapper[5010]: I1126 17:04:47.396832 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6cd9fddc67-q99jj"] Nov 26 17:04:47 crc kubenswrapper[5010]: I1126 17:04:47.432368 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6cd9fddc67-q99jj"] Nov 26 17:04:47 crc kubenswrapper[5010]: I1126 17:04:47.906282 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" path="/var/lib/kubelet/pods/9fbe6bde-b8d7-4c50-b149-b22bb8406db7/volumes" Nov 26 17:04:48 crc kubenswrapper[5010]: I1126 17:04:48.041452 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 17:04:51 crc kubenswrapper[5010]: I1126 17:04:51.892295 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:04:51 crc kubenswrapper[5010]: E1126 17:04:51.892950 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:05:05 crc kubenswrapper[5010]: I1126 17:05:05.892551 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:05:05 crc kubenswrapper[5010]: E1126 17:05:05.893788 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:05:05 crc kubenswrapper[5010]: I1126 17:05:05.956120 5010 scope.go:117] "RemoveContainer" containerID="507ccf248c0ca3a91f14c401102529f83801842c5b0f81464d0c00c40ffa8b3d" Nov 26 17:05:07 crc kubenswrapper[5010]: I1126 17:05:07.960457 5010 generic.go:334] "Generic (PLEG): container finished" podID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerID="31d983b7c2e9337a69a2536624a78e0e58f7766ab308094b198372c43d8f2583" exitCode=137 Nov 26 17:05:07 crc kubenswrapper[5010]: I1126 17:05:07.960649 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3cb5859-80d9-40e5-a96c-18e36dd42c93","Type":"ContainerDied","Data":"31d983b7c2e9337a69a2536624a78e0e58f7766ab308094b198372c43d8f2583"} Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.110414 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190012 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data\") pod \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190095 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3cb5859-80d9-40e5-a96c-18e36dd42c93-etc-machine-id\") pod \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190143 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data-custom\") pod \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190208 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-scripts\") pod \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190273 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-combined-ca-bundle\") pod \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190325 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3cb5859-80d9-40e5-a96c-18e36dd42c93-logs\") pod \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190374 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkm9c\" (UniqueName: \"kubernetes.io/projected/d3cb5859-80d9-40e5-a96c-18e36dd42c93-kube-api-access-nkm9c\") pod \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\" (UID: \"d3cb5859-80d9-40e5-a96c-18e36dd42c93\") " Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.191144 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d3cb5859-80d9-40e5-a96c-18e36dd42c93-logs" (OuterVolumeSpecName: "logs") pod "d3cb5859-80d9-40e5-a96c-18e36dd42c93" (UID: "d3cb5859-80d9-40e5-a96c-18e36dd42c93"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.190839 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3cb5859-80d9-40e5-a96c-18e36dd42c93-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d3cb5859-80d9-40e5-a96c-18e36dd42c93" (UID: "d3cb5859-80d9-40e5-a96c-18e36dd42c93"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.195642 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d3cb5859-80d9-40e5-a96c-18e36dd42c93" (UID: "d3cb5859-80d9-40e5-a96c-18e36dd42c93"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.198062 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-scripts" (OuterVolumeSpecName: "scripts") pod "d3cb5859-80d9-40e5-a96c-18e36dd42c93" (UID: "d3cb5859-80d9-40e5-a96c-18e36dd42c93"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.200826 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3cb5859-80d9-40e5-a96c-18e36dd42c93-kube-api-access-nkm9c" (OuterVolumeSpecName: "kube-api-access-nkm9c") pod "d3cb5859-80d9-40e5-a96c-18e36dd42c93" (UID: "d3cb5859-80d9-40e5-a96c-18e36dd42c93"). InnerVolumeSpecName "kube-api-access-nkm9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.228484 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d3cb5859-80d9-40e5-a96c-18e36dd42c93" (UID: "d3cb5859-80d9-40e5-a96c-18e36dd42c93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.260576 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data" (OuterVolumeSpecName: "config-data") pod "d3cb5859-80d9-40e5-a96c-18e36dd42c93" (UID: "d3cb5859-80d9-40e5-a96c-18e36dd42c93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.292186 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.292215 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d3cb5859-80d9-40e5-a96c-18e36dd42c93-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.292227 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.292235 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.292244 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d3cb5859-80d9-40e5-a96c-18e36dd42c93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.292252 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d3cb5859-80d9-40e5-a96c-18e36dd42c93-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.292260 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkm9c\" (UniqueName: \"kubernetes.io/projected/d3cb5859-80d9-40e5-a96c-18e36dd42c93-kube-api-access-nkm9c\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.970236 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"d3cb5859-80d9-40e5-a96c-18e36dd42c93","Type":"ContainerDied","Data":"43567efb4d08ad0c859a2c4f09f5a21e316eb9ae1ebebf44432397a845e0deca"} Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.970277 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.970283 5010 scope.go:117] "RemoveContainer" containerID="31d983b7c2e9337a69a2536624a78e0e58f7766ab308094b198372c43d8f2583" Nov 26 17:05:08 crc kubenswrapper[5010]: I1126 17:05:08.999918 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.003855 5010 scope.go:117] "RemoveContainer" containerID="38f6bee1ab9abb44eaaf814c67d89a1ee3233e4c4ae7008335c903cfa3cde9fc" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.016605 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028200 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.028655 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028676 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api" Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.028723 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerName="dnsmasq-dns" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028733 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerName="dnsmasq-dns" Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.028757 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="extract-content" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028765 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="extract-content" Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.028784 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="registry-server" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028793 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="registry-server" Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.028819 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="extract-utilities" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028828 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="extract-utilities" Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.028846 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api-log" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028854 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api-log" Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.028864 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerName="init" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.028873 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerName="init" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.029071 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api-log" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.029100 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fbe6bde-b8d7-4c50-b149-b22bb8406db7" containerName="dnsmasq-dns" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.029115 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="90967318-80f9-4d7a-81f5-78978bc25ab8" containerName="registry-server" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.029129 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" containerName="cinder-api" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.030274 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.032485 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.032657 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.032848 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-pmnxt" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.033309 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.033544 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.033828 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.037087 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.104497 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data-custom\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.104646 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/427b755e-a242-4666-9450-58a48dfec489-etc-machine-id\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.104806 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cspg\" (UniqueName: \"kubernetes.io/projected/427b755e-a242-4666-9450-58a48dfec489-kube-api-access-5cspg\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.104864 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.104976 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/427b755e-a242-4666-9450-58a48dfec489-logs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.105104 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-scripts\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.105130 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-public-tls-certs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.105163 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.105261 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207373 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cspg\" (UniqueName: \"kubernetes.io/projected/427b755e-a242-4666-9450-58a48dfec489-kube-api-access-5cspg\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207442 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207501 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/427b755e-a242-4666-9450-58a48dfec489-logs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207570 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-scripts\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207595 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-public-tls-certs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207620 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207674 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207736 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data-custom\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207778 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/427b755e-a242-4666-9450-58a48dfec489-etc-machine-id\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.207877 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/427b755e-a242-4666-9450-58a48dfec489-etc-machine-id\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.208925 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/427b755e-a242-4666-9450-58a48dfec489-logs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.212329 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-scripts\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.212557 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data-custom\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.213094 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.217520 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.218354 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-public-tls-certs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.218842 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.228619 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cspg\" (UniqueName: \"kubernetes.io/projected/427b755e-a242-4666-9450-58a48dfec489-kube-api-access-5cspg\") pod \"cinder-api-0\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.360675 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:09 crc kubenswrapper[5010]: E1126 17:05:09.554519 5010 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.154:40332->38.102.83.154:42721: write tcp 38.102.83.154:40332->38.102.83.154:42721: write: connection reset by peer Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.810139 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.911066 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3cb5859-80d9-40e5-a96c-18e36dd42c93" path="/var/lib/kubelet/pods/d3cb5859-80d9-40e5-a96c-18e36dd42c93/volumes" Nov 26 17:05:09 crc kubenswrapper[5010]: I1126 17:05:09.994668 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"427b755e-a242-4666-9450-58a48dfec489","Type":"ContainerStarted","Data":"02fd78a2dc076e82f5d0c46e8ef3f39b49211bce27ec054f8afa00fd755d01b0"} Nov 26 17:05:11 crc kubenswrapper[5010]: I1126 17:05:11.008647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"427b755e-a242-4666-9450-58a48dfec489","Type":"ContainerStarted","Data":"767d5af8c78c49595d59318126064f71807fe662f0c35f48ca829aa02ccb6127"} Nov 26 17:05:11 crc kubenswrapper[5010]: I1126 17:05:11.009082 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"427b755e-a242-4666-9450-58a48dfec489","Type":"ContainerStarted","Data":"928e2f5280883a6c7345ccc2a95bf74bfa4488e2be874d8796ae2bf3bbbf3cab"} Nov 26 17:05:11 crc kubenswrapper[5010]: I1126 17:05:11.009107 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 17:05:11 crc kubenswrapper[5010]: I1126 17:05:11.050336 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.050315893 podStartE2EDuration="2.050315893s" podCreationTimestamp="2025-11-26 17:05:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:05:11.026022658 +0000 UTC m=+5931.816739806" watchObservedRunningTime="2025-11-26 17:05:11.050315893 +0000 UTC m=+5931.841033041" Nov 26 17:05:18 crc kubenswrapper[5010]: I1126 17:05:18.892891 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:05:18 crc kubenswrapper[5010]: E1126 17:05:18.893958 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:05:21 crc kubenswrapper[5010]: I1126 17:05:21.433568 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 17:05:29 crc kubenswrapper[5010]: I1126 17:05:29.899136 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:05:29 crc kubenswrapper[5010]: E1126 17:05:29.899926 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.570806 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rvhnl"] Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.574932 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.587981 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rvhnl"] Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.632399 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpsgx\" (UniqueName: \"kubernetes.io/projected/04531e6c-a168-4a3f-acc5-43e1411f2ed1-kube-api-access-kpsgx\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.632555 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-utilities\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.632686 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-catalog-content\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.734348 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-catalog-content\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.734501 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpsgx\" (UniqueName: \"kubernetes.io/projected/04531e6c-a168-4a3f-acc5-43e1411f2ed1-kube-api-access-kpsgx\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.734566 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-utilities\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.734980 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-catalog-content\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.735013 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-utilities\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.756769 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpsgx\" (UniqueName: \"kubernetes.io/projected/04531e6c-a168-4a3f-acc5-43e1411f2ed1-kube-api-access-kpsgx\") pod \"redhat-operators-rvhnl\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:31 crc kubenswrapper[5010]: I1126 17:05:31.899399 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:32 crc kubenswrapper[5010]: I1126 17:05:32.383585 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rvhnl"] Nov 26 17:05:33 crc kubenswrapper[5010]: I1126 17:05:33.220245 5010 generic.go:334] "Generic (PLEG): container finished" podID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerID="18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b" exitCode=0 Nov 26 17:05:33 crc kubenswrapper[5010]: I1126 17:05:33.220302 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rvhnl" event={"ID":"04531e6c-a168-4a3f-acc5-43e1411f2ed1","Type":"ContainerDied","Data":"18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b"} Nov 26 17:05:33 crc kubenswrapper[5010]: I1126 17:05:33.220702 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rvhnl" event={"ID":"04531e6c-a168-4a3f-acc5-43e1411f2ed1","Type":"ContainerStarted","Data":"0ecef578a3fb7f4740aed53561382061bd9f7f6db3ccc09ed02029db7b4f94d5"} Nov 26 17:05:35 crc kubenswrapper[5010]: I1126 17:05:35.241330 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rvhnl" event={"ID":"04531e6c-a168-4a3f-acc5-43e1411f2ed1","Type":"ContainerStarted","Data":"a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1"} Nov 26 17:05:36 crc kubenswrapper[5010]: I1126 17:05:36.255500 5010 generic.go:334] "Generic (PLEG): container finished" podID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerID="a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1" exitCode=0 Nov 26 17:05:36 crc kubenswrapper[5010]: I1126 17:05:36.255959 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rvhnl" event={"ID":"04531e6c-a168-4a3f-acc5-43e1411f2ed1","Type":"ContainerDied","Data":"a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1"} Nov 26 17:05:37 crc kubenswrapper[5010]: I1126 17:05:37.270141 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rvhnl" event={"ID":"04531e6c-a168-4a3f-acc5-43e1411f2ed1","Type":"ContainerStarted","Data":"7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e"} Nov 26 17:05:37 crc kubenswrapper[5010]: I1126 17:05:37.305042 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rvhnl" podStartSLOduration=2.528603566 podStartE2EDuration="6.30502281s" podCreationTimestamp="2025-11-26 17:05:31 +0000 UTC" firstStartedPulling="2025-11-26 17:05:33.223807391 +0000 UTC m=+5954.014524559" lastFinishedPulling="2025-11-26 17:05:37.000226625 +0000 UTC m=+5957.790943803" observedRunningTime="2025-11-26 17:05:37.296830816 +0000 UTC m=+5958.087548064" watchObservedRunningTime="2025-11-26 17:05:37.30502281 +0000 UTC m=+5958.095739948" Nov 26 17:05:41 crc kubenswrapper[5010]: I1126 17:05:41.905644 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:41 crc kubenswrapper[5010]: I1126 17:05:41.906681 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.732552 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.738119 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.740425 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.757501 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.851813 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-scripts\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.851862 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.852271 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c7ca36b-3fe0-420b-b7fe-8420a1544075-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.852429 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.852542 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r72gc\" (UniqueName: \"kubernetes.io/projected/5c7ca36b-3fe0-420b-b7fe-8420a1544075-kube-api-access-r72gc\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.852613 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.891940 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.953797 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-scripts\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.954099 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.954193 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c7ca36b-3fe0-420b-b7fe-8420a1544075-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.954224 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.954251 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r72gc\" (UniqueName: \"kubernetes.io/projected/5c7ca36b-3fe0-420b-b7fe-8420a1544075-kube-api-access-r72gc\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.954300 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.955274 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c7ca36b-3fe0-420b-b7fe-8420a1544075-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.960733 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.961210 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.962105 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-scripts\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.965507 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.968733 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rvhnl" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="registry-server" probeResult="failure" output=< Nov 26 17:05:42 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:05:42 crc kubenswrapper[5010]: > Nov 26 17:05:42 crc kubenswrapper[5010]: I1126 17:05:42.971431 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r72gc\" (UniqueName: \"kubernetes.io/projected/5c7ca36b-3fe0-420b-b7fe-8420a1544075-kube-api-access-r72gc\") pod \"cinder-scheduler-0\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:43 crc kubenswrapper[5010]: I1126 17:05:43.067051 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 17:05:43 crc kubenswrapper[5010]: I1126 17:05:43.337382 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"4b598b46f2cf6c5daaf375b8d9dc8672aba51e2bbf338cbfbf04472a425972f5"} Nov 26 17:05:43 crc kubenswrapper[5010]: I1126 17:05:43.543241 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:43 crc kubenswrapper[5010]: W1126 17:05:43.549571 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c7ca36b_3fe0_420b_b7fe_8420a1544075.slice/crio-4bef4f29845cc5c24b410be4fd879d3d060d1873ae4d1df15de1a34c176176d2 WatchSource:0}: Error finding container 4bef4f29845cc5c24b410be4fd879d3d060d1873ae4d1df15de1a34c176176d2: Status 404 returned error can't find the container with id 4bef4f29845cc5c24b410be4fd879d3d060d1873ae4d1df15de1a34c176176d2 Nov 26 17:05:44 crc kubenswrapper[5010]: I1126 17:05:44.062337 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:44 crc kubenswrapper[5010]: I1126 17:05:44.064565 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api" containerID="cri-o://767d5af8c78c49595d59318126064f71807fe662f0c35f48ca829aa02ccb6127" gracePeriod=30 Nov 26 17:05:44 crc kubenswrapper[5010]: I1126 17:05:44.064525 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api-log" containerID="cri-o://928e2f5280883a6c7345ccc2a95bf74bfa4488e2be874d8796ae2bf3bbbf3cab" gracePeriod=30 Nov 26 17:05:44 crc kubenswrapper[5010]: I1126 17:05:44.362180 5010 generic.go:334] "Generic (PLEG): container finished" podID="427b755e-a242-4666-9450-58a48dfec489" containerID="928e2f5280883a6c7345ccc2a95bf74bfa4488e2be874d8796ae2bf3bbbf3cab" exitCode=143 Nov 26 17:05:44 crc kubenswrapper[5010]: I1126 17:05:44.362541 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"427b755e-a242-4666-9450-58a48dfec489","Type":"ContainerDied","Data":"928e2f5280883a6c7345ccc2a95bf74bfa4488e2be874d8796ae2bf3bbbf3cab"} Nov 26 17:05:44 crc kubenswrapper[5010]: I1126 17:05:44.365071 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5c7ca36b-3fe0-420b-b7fe-8420a1544075","Type":"ContainerStarted","Data":"f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84"} Nov 26 17:05:44 crc kubenswrapper[5010]: I1126 17:05:44.365108 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5c7ca36b-3fe0-420b-b7fe-8420a1544075","Type":"ContainerStarted","Data":"4bef4f29845cc5c24b410be4fd879d3d060d1873ae4d1df15de1a34c176176d2"} Nov 26 17:05:45 crc kubenswrapper[5010]: I1126 17:05:45.376396 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5c7ca36b-3fe0-420b-b7fe-8420a1544075","Type":"ContainerStarted","Data":"6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af"} Nov 26 17:05:45 crc kubenswrapper[5010]: I1126 17:05:45.412461 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.412440024 podStartE2EDuration="3.412440024s" podCreationTimestamp="2025-11-26 17:05:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:05:45.398089507 +0000 UTC m=+5966.188806695" watchObservedRunningTime="2025-11-26 17:05:45.412440024 +0000 UTC m=+5966.203157172" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.224663 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.1.68:8776/healthcheck\": read tcp 10.217.0.2:58024->10.217.1.68:8776: read: connection reset by peer" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.397685 5010 generic.go:334] "Generic (PLEG): container finished" podID="427b755e-a242-4666-9450-58a48dfec489" containerID="767d5af8c78c49595d59318126064f71807fe662f0c35f48ca829aa02ccb6127" exitCode=0 Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.397800 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"427b755e-a242-4666-9450-58a48dfec489","Type":"ContainerDied","Data":"767d5af8c78c49595d59318126064f71807fe662f0c35f48ca829aa02ccb6127"} Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.698580 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.842916 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/427b755e-a242-4666-9450-58a48dfec489-logs\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.842961 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-public-tls-certs\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843025 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5cspg\" (UniqueName: \"kubernetes.io/projected/427b755e-a242-4666-9450-58a48dfec489-kube-api-access-5cspg\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843089 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843134 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-internal-tls-certs\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843156 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-scripts\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843200 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/427b755e-a242-4666-9450-58a48dfec489-etc-machine-id\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843248 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data-custom\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843269 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-combined-ca-bundle\") pod \"427b755e-a242-4666-9450-58a48dfec489\" (UID: \"427b755e-a242-4666-9450-58a48dfec489\") " Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843620 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/427b755e-a242-4666-9450-58a48dfec489-logs" (OuterVolumeSpecName: "logs") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.843699 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/427b755e-a242-4666-9450-58a48dfec489-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.849105 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-scripts" (OuterVolumeSpecName: "scripts") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.849413 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/427b755e-a242-4666-9450-58a48dfec489-kube-api-access-5cspg" (OuterVolumeSpecName: "kube-api-access-5cspg") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "kube-api-access-5cspg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.849913 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.878396 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.899338 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data" (OuterVolumeSpecName: "config-data") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.911982 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.919884 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "427b755e-a242-4666-9450-58a48dfec489" (UID: "427b755e-a242-4666-9450-58a48dfec489"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.945973 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946029 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946039 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946048 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/427b755e-a242-4666-9450-58a48dfec489-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946057 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946067 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946077 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/427b755e-a242-4666-9450-58a48dfec489-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946085 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/427b755e-a242-4666-9450-58a48dfec489-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:47 crc kubenswrapper[5010]: I1126 17:05:47.946095 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5cspg\" (UniqueName: \"kubernetes.io/projected/427b755e-a242-4666-9450-58a48dfec489-kube-api-access-5cspg\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.067678 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.408432 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"427b755e-a242-4666-9450-58a48dfec489","Type":"ContainerDied","Data":"02fd78a2dc076e82f5d0c46e8ef3f39b49211bce27ec054f8afa00fd755d01b0"} Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.408487 5010 scope.go:117] "RemoveContainer" containerID="767d5af8c78c49595d59318126064f71807fe662f0c35f48ca829aa02ccb6127" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.408543 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.487399 5010 scope.go:117] "RemoveContainer" containerID="928e2f5280883a6c7345ccc2a95bf74bfa4488e2be874d8796ae2bf3bbbf3cab" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.523184 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.558073 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.566520 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:48 crc kubenswrapper[5010]: E1126 17:05:48.567380 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.567405 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api" Nov 26 17:05:48 crc kubenswrapper[5010]: E1126 17:05:48.567424 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api-log" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.567435 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api-log" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.567601 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.567628 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="427b755e-a242-4666-9450-58a48dfec489" containerName="cinder-api-log" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.568600 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.571958 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.572415 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.572815 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.577751 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669114 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3308f215-a7a3-4810-bc0e-a6556edadf05-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669192 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3308f215-a7a3-4810-bc0e-a6556edadf05-logs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669222 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669282 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669304 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrtq6\" (UniqueName: \"kubernetes.io/projected/3308f215-a7a3-4810-bc0e-a6556edadf05-kube-api-access-qrtq6\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669327 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669383 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-config-data\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669433 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-config-data-custom\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.669456 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-scripts\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771022 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3308f215-a7a3-4810-bc0e-a6556edadf05-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771123 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3308f215-a7a3-4810-bc0e-a6556edadf05-logs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771174 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771175 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3308f215-a7a3-4810-bc0e-a6556edadf05-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771193 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771274 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrtq6\" (UniqueName: \"kubernetes.io/projected/3308f215-a7a3-4810-bc0e-a6556edadf05-kube-api-access-qrtq6\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771339 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771380 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-config-data\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771508 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-config-data-custom\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.771561 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-scripts\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.772871 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3308f215-a7a3-4810-bc0e-a6556edadf05-logs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.776042 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.776293 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.778371 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-scripts\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.779260 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-config-data\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.779818 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-config-data-custom\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.780207 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3308f215-a7a3-4810-bc0e-a6556edadf05-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.810900 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrtq6\" (UniqueName: \"kubernetes.io/projected/3308f215-a7a3-4810-bc0e-a6556edadf05-kube-api-access-qrtq6\") pod \"cinder-api-0\" (UID: \"3308f215-a7a3-4810-bc0e-a6556edadf05\") " pod="openstack/cinder-api-0" Nov 26 17:05:48 crc kubenswrapper[5010]: I1126 17:05:48.892081 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 26 17:05:49 crc kubenswrapper[5010]: I1126 17:05:49.342052 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 26 17:05:49 crc kubenswrapper[5010]: W1126 17:05:49.348990 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3308f215_a7a3_4810_bc0e_a6556edadf05.slice/crio-8872d6b5e61da0ddef4d0e18fb7385d48b5eee5ac00b1e5f1a183a0f6ba335e7 WatchSource:0}: Error finding container 8872d6b5e61da0ddef4d0e18fb7385d48b5eee5ac00b1e5f1a183a0f6ba335e7: Status 404 returned error can't find the container with id 8872d6b5e61da0ddef4d0e18fb7385d48b5eee5ac00b1e5f1a183a0f6ba335e7 Nov 26 17:05:49 crc kubenswrapper[5010]: I1126 17:05:49.419691 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3308f215-a7a3-4810-bc0e-a6556edadf05","Type":"ContainerStarted","Data":"8872d6b5e61da0ddef4d0e18fb7385d48b5eee5ac00b1e5f1a183a0f6ba335e7"} Nov 26 17:05:49 crc kubenswrapper[5010]: I1126 17:05:49.910357 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="427b755e-a242-4666-9450-58a48dfec489" path="/var/lib/kubelet/pods/427b755e-a242-4666-9450-58a48dfec489/volumes" Nov 26 17:05:50 crc kubenswrapper[5010]: I1126 17:05:50.432768 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3308f215-a7a3-4810-bc0e-a6556edadf05","Type":"ContainerStarted","Data":"a8a194eca14841b138526d6c9306084dc1e7fc682d7d7ccbe36f21d1deb3eb60"} Nov 26 17:05:51 crc kubenswrapper[5010]: I1126 17:05:51.447208 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3308f215-a7a3-4810-bc0e-a6556edadf05","Type":"ContainerStarted","Data":"ecf0b5b19bf10d96875992f2ed22e1b778b8a5500b483ec932a7ff4d97fc706c"} Nov 26 17:05:51 crc kubenswrapper[5010]: I1126 17:05:51.447802 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 26 17:05:51 crc kubenswrapper[5010]: I1126 17:05:51.471427 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.471410371 podStartE2EDuration="3.471410371s" podCreationTimestamp="2025-11-26 17:05:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:05:51.469472513 +0000 UTC m=+5972.260189741" watchObservedRunningTime="2025-11-26 17:05:51.471410371 +0000 UTC m=+5972.262127519" Nov 26 17:05:51 crc kubenswrapper[5010]: I1126 17:05:51.954688 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:52 crc kubenswrapper[5010]: I1126 17:05:52.008992 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:52 crc kubenswrapper[5010]: I1126 17:05:52.198497 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rvhnl"] Nov 26 17:05:53 crc kubenswrapper[5010]: I1126 17:05:53.293073 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 17:05:53 crc kubenswrapper[5010]: I1126 17:05:53.355968 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:53 crc kubenswrapper[5010]: I1126 17:05:53.466239 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="cinder-scheduler" containerID="cri-o://f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84" gracePeriod=30 Nov 26 17:05:53 crc kubenswrapper[5010]: I1126 17:05:53.466302 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="probe" containerID="cri-o://6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af" gracePeriod=30 Nov 26 17:05:53 crc kubenswrapper[5010]: I1126 17:05:53.466337 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rvhnl" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="registry-server" containerID="cri-o://7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e" gracePeriod=2 Nov 26 17:05:53 crc kubenswrapper[5010]: I1126 17:05:53.917120 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.088898 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-catalog-content\") pod \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.089232 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-utilities\") pod \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.089343 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpsgx\" (UniqueName: \"kubernetes.io/projected/04531e6c-a168-4a3f-acc5-43e1411f2ed1-kube-api-access-kpsgx\") pod \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\" (UID: \"04531e6c-a168-4a3f-acc5-43e1411f2ed1\") " Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.090726 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-utilities" (OuterVolumeSpecName: "utilities") pod "04531e6c-a168-4a3f-acc5-43e1411f2ed1" (UID: "04531e6c-a168-4a3f-acc5-43e1411f2ed1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.094970 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04531e6c-a168-4a3f-acc5-43e1411f2ed1-kube-api-access-kpsgx" (OuterVolumeSpecName: "kube-api-access-kpsgx") pod "04531e6c-a168-4a3f-acc5-43e1411f2ed1" (UID: "04531e6c-a168-4a3f-acc5-43e1411f2ed1"). InnerVolumeSpecName "kube-api-access-kpsgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.192043 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.192072 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpsgx\" (UniqueName: \"kubernetes.io/projected/04531e6c-a168-4a3f-acc5-43e1411f2ed1-kube-api-access-kpsgx\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.195641 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "04531e6c-a168-4a3f-acc5-43e1411f2ed1" (UID: "04531e6c-a168-4a3f-acc5-43e1411f2ed1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.294416 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/04531e6c-a168-4a3f-acc5-43e1411f2ed1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.481437 5010 generic.go:334] "Generic (PLEG): container finished" podID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerID="6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af" exitCode=0 Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.481506 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5c7ca36b-3fe0-420b-b7fe-8420a1544075","Type":"ContainerDied","Data":"6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af"} Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.485087 5010 generic.go:334] "Generic (PLEG): container finished" podID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerID="7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e" exitCode=0 Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.485322 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rvhnl" event={"ID":"04531e6c-a168-4a3f-acc5-43e1411f2ed1","Type":"ContainerDied","Data":"7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e"} Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.485565 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rvhnl" event={"ID":"04531e6c-a168-4a3f-acc5-43e1411f2ed1","Type":"ContainerDied","Data":"0ecef578a3fb7f4740aed53561382061bd9f7f6db3ccc09ed02029db7b4f94d5"} Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.485386 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rvhnl" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.485631 5010 scope.go:117] "RemoveContainer" containerID="7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.512459 5010 scope.go:117] "RemoveContainer" containerID="a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.535603 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rvhnl"] Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.562470 5010 scope.go:117] "RemoveContainer" containerID="18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.566805 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rvhnl"] Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.585772 5010 scope.go:117] "RemoveContainer" containerID="7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e" Nov 26 17:05:54 crc kubenswrapper[5010]: E1126 17:05:54.586511 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e\": container with ID starting with 7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e not found: ID does not exist" containerID="7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.586582 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e"} err="failed to get container status \"7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e\": rpc error: code = NotFound desc = could not find container \"7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e\": container with ID starting with 7f3a6d821191747b26ae23f009dbb00ef4eedcf6bca2743516cd9ad43462f64e not found: ID does not exist" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.586625 5010 scope.go:117] "RemoveContainer" containerID="a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1" Nov 26 17:05:54 crc kubenswrapper[5010]: E1126 17:05:54.587063 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1\": container with ID starting with a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1 not found: ID does not exist" containerID="a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.587225 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1"} err="failed to get container status \"a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1\": rpc error: code = NotFound desc = could not find container \"a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1\": container with ID starting with a78954f4942eeeef813382e7106f7862d4fbe517d313327e789c429a917ea2f1 not found: ID does not exist" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.587333 5010 scope.go:117] "RemoveContainer" containerID="18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b" Nov 26 17:05:54 crc kubenswrapper[5010]: E1126 17:05:54.587772 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b\": container with ID starting with 18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b not found: ID does not exist" containerID="18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b" Nov 26 17:05:54 crc kubenswrapper[5010]: I1126 17:05:54.587879 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b"} err="failed to get container status \"18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b\": rpc error: code = NotFound desc = could not find container \"18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b\": container with ID starting with 18a3879e8e1ab7fc31aec24eeb17231b2ef40634dcbe0cf5a8edc5d07427d25b not found: ID does not exist" Nov 26 17:05:55 crc kubenswrapper[5010]: I1126 17:05:55.904270 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" path="/var/lib/kubelet/pods/04531e6c-a168-4a3f-acc5-43e1411f2ed1/volumes" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.050639 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.235799 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-scripts\") pod \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.235854 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-combined-ca-bundle\") pod \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.235969 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data\") pod \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.235996 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c7ca36b-3fe0-420b-b7fe-8420a1544075-etc-machine-id\") pod \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.236037 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data-custom\") pod \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.236057 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r72gc\" (UniqueName: \"kubernetes.io/projected/5c7ca36b-3fe0-420b-b7fe-8420a1544075-kube-api-access-r72gc\") pod \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\" (UID: \"5c7ca36b-3fe0-420b-b7fe-8420a1544075\") " Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.236154 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c7ca36b-3fe0-420b-b7fe-8420a1544075-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "5c7ca36b-3fe0-420b-b7fe-8420a1544075" (UID: "5c7ca36b-3fe0-420b-b7fe-8420a1544075"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.236386 5010 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5c7ca36b-3fe0-420b-b7fe-8420a1544075-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.242021 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "5c7ca36b-3fe0-420b-b7fe-8420a1544075" (UID: "5c7ca36b-3fe0-420b-b7fe-8420a1544075"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.243306 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-scripts" (OuterVolumeSpecName: "scripts") pod "5c7ca36b-3fe0-420b-b7fe-8420a1544075" (UID: "5c7ca36b-3fe0-420b-b7fe-8420a1544075"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.253059 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c7ca36b-3fe0-420b-b7fe-8420a1544075-kube-api-access-r72gc" (OuterVolumeSpecName: "kube-api-access-r72gc") pod "5c7ca36b-3fe0-420b-b7fe-8420a1544075" (UID: "5c7ca36b-3fe0-420b-b7fe-8420a1544075"). InnerVolumeSpecName "kube-api-access-r72gc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.292368 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c7ca36b-3fe0-420b-b7fe-8420a1544075" (UID: "5c7ca36b-3fe0-420b-b7fe-8420a1544075"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.337674 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.337747 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r72gc\" (UniqueName: \"kubernetes.io/projected/5c7ca36b-3fe0-420b-b7fe-8420a1544075-kube-api-access-r72gc\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.337762 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.337771 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.351627 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data" (OuterVolumeSpecName: "config-data") pod "5c7ca36b-3fe0-420b-b7fe-8420a1544075" (UID: "5c7ca36b-3fe0-420b-b7fe-8420a1544075"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.439395 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c7ca36b-3fe0-420b-b7fe-8420a1544075-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.510077 5010 generic.go:334] "Generic (PLEG): container finished" podID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerID="f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84" exitCode=0 Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.510137 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5c7ca36b-3fe0-420b-b7fe-8420a1544075","Type":"ContainerDied","Data":"f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84"} Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.510170 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.510196 5010 scope.go:117] "RemoveContainer" containerID="6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.510180 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5c7ca36b-3fe0-420b-b7fe-8420a1544075","Type":"ContainerDied","Data":"4bef4f29845cc5c24b410be4fd879d3d060d1873ae4d1df15de1a34c176176d2"} Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.552638 5010 scope.go:117] "RemoveContainer" containerID="f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.557669 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.588653 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.595261 5010 scope.go:117] "RemoveContainer" containerID="6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af" Nov 26 17:05:56 crc kubenswrapper[5010]: E1126 17:05:56.595673 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af\": container with ID starting with 6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af not found: ID does not exist" containerID="6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.595780 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af"} err="failed to get container status \"6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af\": rpc error: code = NotFound desc = could not find container \"6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af\": container with ID starting with 6ca5680ba4e48cefdfc71c35b3f213a37fa12568307827d1fb6aff2c7254d3af not found: ID does not exist" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.595813 5010 scope.go:117] "RemoveContainer" containerID="f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84" Nov 26 17:05:56 crc kubenswrapper[5010]: E1126 17:05:56.596072 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84\": container with ID starting with f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84 not found: ID does not exist" containerID="f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.596099 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84"} err="failed to get container status \"f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84\": rpc error: code = NotFound desc = could not find container \"f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84\": container with ID starting with f5258355e8fb4c4e9f5006751d19503e20881dca4cee252910a819dabca23f84 not found: ID does not exist" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604007 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:56 crc kubenswrapper[5010]: E1126 17:05:56.604406 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="registry-server" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604427 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="registry-server" Nov 26 17:05:56 crc kubenswrapper[5010]: E1126 17:05:56.604458 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="extract-content" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604466 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="extract-content" Nov 26 17:05:56 crc kubenswrapper[5010]: E1126 17:05:56.604482 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="extract-utilities" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604489 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="extract-utilities" Nov 26 17:05:56 crc kubenswrapper[5010]: E1126 17:05:56.604499 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="probe" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604506 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="probe" Nov 26 17:05:56 crc kubenswrapper[5010]: E1126 17:05:56.604537 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="cinder-scheduler" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604543 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="cinder-scheduler" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604693 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="cinder-scheduler" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604707 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="04531e6c-a168-4a3f-acc5-43e1411f2ed1" containerName="registry-server" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.604736 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" containerName="probe" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.605800 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.613146 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.617616 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.747927 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7mnr\" (UniqueName: \"kubernetes.io/projected/73c74793-8ce0-4b8c-92f9-5e01c0462723-kube-api-access-q7mnr\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.748003 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-scripts\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.748034 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.748061 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-config-data\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.748150 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/73c74793-8ce0-4b8c-92f9-5e01c0462723-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.748188 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.852131 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/73c74793-8ce0-4b8c-92f9-5e01c0462723-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.852193 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/73c74793-8ce0-4b8c-92f9-5e01c0462723-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.852236 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.852825 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7mnr\" (UniqueName: \"kubernetes.io/projected/73c74793-8ce0-4b8c-92f9-5e01c0462723-kube-api-access-q7mnr\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.852880 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-scripts\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.852915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.852942 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-config-data\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.856759 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-scripts\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.857527 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.861887 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-config-data\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.862443 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73c74793-8ce0-4b8c-92f9-5e01c0462723-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.867845 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7mnr\" (UniqueName: \"kubernetes.io/projected/73c74793-8ce0-4b8c-92f9-5e01c0462723-kube-api-access-q7mnr\") pod \"cinder-scheduler-0\" (UID: \"73c74793-8ce0-4b8c-92f9-5e01c0462723\") " pod="openstack/cinder-scheduler-0" Nov 26 17:05:56 crc kubenswrapper[5010]: I1126 17:05:56.930533 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 26 17:05:57 crc kubenswrapper[5010]: I1126 17:05:57.155054 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 26 17:05:57 crc kubenswrapper[5010]: I1126 17:05:57.528866 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"73c74793-8ce0-4b8c-92f9-5e01c0462723","Type":"ContainerStarted","Data":"496d58bb5f594bc40da9fb88f097e32935e4d1732da0eee62adb0afbf0f86938"} Nov 26 17:05:57 crc kubenswrapper[5010]: I1126 17:05:57.903302 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c7ca36b-3fe0-420b-b7fe-8420a1544075" path="/var/lib/kubelet/pods/5c7ca36b-3fe0-420b-b7fe-8420a1544075/volumes" Nov 26 17:05:58 crc kubenswrapper[5010]: I1126 17:05:58.563104 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"73c74793-8ce0-4b8c-92f9-5e01c0462723","Type":"ContainerStarted","Data":"aed6ad9c2428a6f239bb88760c934f01df43a9bd9d6903d48ab700ac4386fa65"} Nov 26 17:05:58 crc kubenswrapper[5010]: I1126 17:05:58.563838 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"73c74793-8ce0-4b8c-92f9-5e01c0462723","Type":"ContainerStarted","Data":"c37b1caa8ece7a9e708cc4fd6a573da3382724c8145b5ed0e5b0eb9a43f4ccae"} Nov 26 17:05:58 crc kubenswrapper[5010]: I1126 17:05:58.592377 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.592351526 podStartE2EDuration="2.592351526s" podCreationTimestamp="2025-11-26 17:05:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:05:58.585589548 +0000 UTC m=+5979.376306706" watchObservedRunningTime="2025-11-26 17:05:58.592351526 +0000 UTC m=+5979.383068664" Nov 26 17:06:00 crc kubenswrapper[5010]: I1126 17:06:00.885453 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 26 17:06:01 crc kubenswrapper[5010]: I1126 17:06:01.931142 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 26 17:06:06 crc kubenswrapper[5010]: I1126 17:06:06.103101 5010 scope.go:117] "RemoveContainer" containerID="6974d7e0aa3191782462b813b3df96d59ca21913ae72c1f586e286dcf61c1e32" Nov 26 17:06:06 crc kubenswrapper[5010]: I1126 17:06:06.130600 5010 scope.go:117] "RemoveContainer" containerID="3d5b121e410410910ea015003d98ef0fc4fdd64704ac34b8aa43f9194347fb52" Nov 26 17:06:06 crc kubenswrapper[5010]: I1126 17:06:06.186575 5010 scope.go:117] "RemoveContainer" containerID="fa9c10c7470155c8b44c47d08fcc86bbbec9230780d9a4938a8279298f88d521" Nov 26 17:06:07 crc kubenswrapper[5010]: I1126 17:06:07.138250 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.671141 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-7krs5"] Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.673651 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7krs5" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.694131 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-7krs5"] Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.736456 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1919efcb-b975-4346-83d6-5a62ecb38f8e-operator-scripts\") pod \"glance-db-create-7krs5\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " pod="openstack/glance-db-create-7krs5" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.736507 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwc8m\" (UniqueName: \"kubernetes.io/projected/1919efcb-b975-4346-83d6-5a62ecb38f8e-kube-api-access-bwc8m\") pod \"glance-db-create-7krs5\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " pod="openstack/glance-db-create-7krs5" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.786772 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-3cbe-account-create-update-2z9p8"] Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.788742 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.794529 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.805487 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-3cbe-account-create-update-2z9p8"] Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.838268 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1919efcb-b975-4346-83d6-5a62ecb38f8e-operator-scripts\") pod \"glance-db-create-7krs5\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " pod="openstack/glance-db-create-7krs5" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.838513 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwc8m\" (UniqueName: \"kubernetes.io/projected/1919efcb-b975-4346-83d6-5a62ecb38f8e-kube-api-access-bwc8m\") pod \"glance-db-create-7krs5\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " pod="openstack/glance-db-create-7krs5" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.839399 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1919efcb-b975-4346-83d6-5a62ecb38f8e-operator-scripts\") pod \"glance-db-create-7krs5\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " pod="openstack/glance-db-create-7krs5" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.860014 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwc8m\" (UniqueName: \"kubernetes.io/projected/1919efcb-b975-4346-83d6-5a62ecb38f8e-kube-api-access-bwc8m\") pod \"glance-db-create-7krs5\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " pod="openstack/glance-db-create-7krs5" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.940561 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n7rw\" (UniqueName: \"kubernetes.io/projected/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-kube-api-access-8n7rw\") pod \"glance-3cbe-account-create-update-2z9p8\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:09 crc kubenswrapper[5010]: I1126 17:06:09.940697 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-operator-scripts\") pod \"glance-3cbe-account-create-update-2z9p8\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.042089 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-operator-scripts\") pod \"glance-3cbe-account-create-update-2z9p8\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.042232 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n7rw\" (UniqueName: \"kubernetes.io/projected/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-kube-api-access-8n7rw\") pod \"glance-3cbe-account-create-update-2z9p8\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.043991 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-operator-scripts\") pod \"glance-3cbe-account-create-update-2z9p8\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.045059 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7krs5" Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.067520 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n7rw\" (UniqueName: \"kubernetes.io/projected/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-kube-api-access-8n7rw\") pod \"glance-3cbe-account-create-update-2z9p8\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.110206 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.564586 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-7krs5"] Nov 26 17:06:10 crc kubenswrapper[5010]: W1126 17:06:10.572858 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1919efcb_b975_4346_83d6_5a62ecb38f8e.slice/crio-a80d2cbc96ead3890092c33faef63d509c70b17ea9cd39fd50669ac2c178070c WatchSource:0}: Error finding container a80d2cbc96ead3890092c33faef63d509c70b17ea9cd39fd50669ac2c178070c: Status 404 returned error can't find the container with id a80d2cbc96ead3890092c33faef63d509c70b17ea9cd39fd50669ac2c178070c Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.631630 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-3cbe-account-create-update-2z9p8"] Nov 26 17:06:10 crc kubenswrapper[5010]: W1126 17:06:10.636259 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dd9f4f8_42b0_49e7_a195_ccb1f26ca7ea.slice/crio-1b2fd2c7f1d20793bd083b102dcdaae7727495edce2ac3f1424aacc37ccc07a1 WatchSource:0}: Error finding container 1b2fd2c7f1d20793bd083b102dcdaae7727495edce2ac3f1424aacc37ccc07a1: Status 404 returned error can't find the container with id 1b2fd2c7f1d20793bd083b102dcdaae7727495edce2ac3f1424aacc37ccc07a1 Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.689544 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-7krs5" event={"ID":"1919efcb-b975-4346-83d6-5a62ecb38f8e","Type":"ContainerStarted","Data":"a80d2cbc96ead3890092c33faef63d509c70b17ea9cd39fd50669ac2c178070c"} Nov 26 17:06:10 crc kubenswrapper[5010]: I1126 17:06:10.692982 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3cbe-account-create-update-2z9p8" event={"ID":"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea","Type":"ContainerStarted","Data":"1b2fd2c7f1d20793bd083b102dcdaae7727495edce2ac3f1424aacc37ccc07a1"} Nov 26 17:06:11 crc kubenswrapper[5010]: I1126 17:06:11.705049 5010 generic.go:334] "Generic (PLEG): container finished" podID="1919efcb-b975-4346-83d6-5a62ecb38f8e" containerID="bed95050dadf9ae64c267169121063109fb44e09a4436bbab27599cab5316739" exitCode=0 Nov 26 17:06:11 crc kubenswrapper[5010]: I1126 17:06:11.705172 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-7krs5" event={"ID":"1919efcb-b975-4346-83d6-5a62ecb38f8e","Type":"ContainerDied","Data":"bed95050dadf9ae64c267169121063109fb44e09a4436bbab27599cab5316739"} Nov 26 17:06:11 crc kubenswrapper[5010]: I1126 17:06:11.708143 5010 generic.go:334] "Generic (PLEG): container finished" podID="2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea" containerID="ec802261bbbd66221ad53688b9a2c9db41a84d77432395a0b6a926df6d41dbde" exitCode=0 Nov 26 17:06:11 crc kubenswrapper[5010]: I1126 17:06:11.708278 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3cbe-account-create-update-2z9p8" event={"ID":"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea","Type":"ContainerDied","Data":"ec802261bbbd66221ad53688b9a2c9db41a84d77432395a0b6a926df6d41dbde"} Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.144573 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7krs5" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.150417 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.308196 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwc8m\" (UniqueName: \"kubernetes.io/projected/1919efcb-b975-4346-83d6-5a62ecb38f8e-kube-api-access-bwc8m\") pod \"1919efcb-b975-4346-83d6-5a62ecb38f8e\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.308302 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1919efcb-b975-4346-83d6-5a62ecb38f8e-operator-scripts\") pod \"1919efcb-b975-4346-83d6-5a62ecb38f8e\" (UID: \"1919efcb-b975-4346-83d6-5a62ecb38f8e\") " Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.308476 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8n7rw\" (UniqueName: \"kubernetes.io/projected/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-kube-api-access-8n7rw\") pod \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.308539 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-operator-scripts\") pod \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\" (UID: \"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea\") " Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.309449 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1919efcb-b975-4346-83d6-5a62ecb38f8e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1919efcb-b975-4346-83d6-5a62ecb38f8e" (UID: "1919efcb-b975-4346-83d6-5a62ecb38f8e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.309458 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea" (UID: "2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.316321 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1919efcb-b975-4346-83d6-5a62ecb38f8e-kube-api-access-bwc8m" (OuterVolumeSpecName: "kube-api-access-bwc8m") pod "1919efcb-b975-4346-83d6-5a62ecb38f8e" (UID: "1919efcb-b975-4346-83d6-5a62ecb38f8e"). InnerVolumeSpecName "kube-api-access-bwc8m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.317407 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-kube-api-access-8n7rw" (OuterVolumeSpecName: "kube-api-access-8n7rw") pod "2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea" (UID: "2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea"). InnerVolumeSpecName "kube-api-access-8n7rw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.411177 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwc8m\" (UniqueName: \"kubernetes.io/projected/1919efcb-b975-4346-83d6-5a62ecb38f8e-kube-api-access-bwc8m\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.411319 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1919efcb-b975-4346-83d6-5a62ecb38f8e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.411351 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8n7rw\" (UniqueName: \"kubernetes.io/projected/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-kube-api-access-8n7rw\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.411404 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.741013 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-7krs5" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.741323 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-7krs5" event={"ID":"1919efcb-b975-4346-83d6-5a62ecb38f8e","Type":"ContainerDied","Data":"a80d2cbc96ead3890092c33faef63d509c70b17ea9cd39fd50669ac2c178070c"} Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.741503 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a80d2cbc96ead3890092c33faef63d509c70b17ea9cd39fd50669ac2c178070c" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.746884 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-3cbe-account-create-update-2z9p8" event={"ID":"2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea","Type":"ContainerDied","Data":"1b2fd2c7f1d20793bd083b102dcdaae7727495edce2ac3f1424aacc37ccc07a1"} Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.747309 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b2fd2c7f1d20793bd083b102dcdaae7727495edce2ac3f1424aacc37ccc07a1" Nov 26 17:06:13 crc kubenswrapper[5010]: I1126 17:06:13.746922 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-3cbe-account-create-update-2z9p8" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.100770 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-mgrwf"] Nov 26 17:06:15 crc kubenswrapper[5010]: E1126 17:06:15.101471 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1919efcb-b975-4346-83d6-5a62ecb38f8e" containerName="mariadb-database-create" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.101482 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1919efcb-b975-4346-83d6-5a62ecb38f8e" containerName="mariadb-database-create" Nov 26 17:06:15 crc kubenswrapper[5010]: E1126 17:06:15.101503 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea" containerName="mariadb-account-create-update" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.101509 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea" containerName="mariadb-account-create-update" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.101671 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1919efcb-b975-4346-83d6-5a62ecb38f8e" containerName="mariadb-database-create" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.101686 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea" containerName="mariadb-account-create-update" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.102284 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.104208 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.104241 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-7hf2b" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.116776 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-mgrwf"] Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.148328 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-db-sync-config-data\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.148367 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cck72\" (UniqueName: \"kubernetes.io/projected/01e05e22-ea98-435b-a24a-1d46bd501bff-kube-api-access-cck72\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.148436 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-combined-ca-bundle\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.148477 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-config-data\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.249737 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-combined-ca-bundle\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.249807 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-config-data\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.249896 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-db-sync-config-data\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.249916 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cck72\" (UniqueName: \"kubernetes.io/projected/01e05e22-ea98-435b-a24a-1d46bd501bff-kube-api-access-cck72\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.255429 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-db-sync-config-data\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.255725 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-config-data\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.255739 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-combined-ca-bundle\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.270816 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cck72\" (UniqueName: \"kubernetes.io/projected/01e05e22-ea98-435b-a24a-1d46bd501bff-kube-api-access-cck72\") pod \"glance-db-sync-mgrwf\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:15 crc kubenswrapper[5010]: I1126 17:06:15.432181 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:16 crc kubenswrapper[5010]: I1126 17:06:16.012638 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-mgrwf"] Nov 26 17:06:16 crc kubenswrapper[5010]: W1126 17:06:16.022609 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod01e05e22_ea98_435b_a24a_1d46bd501bff.slice/crio-e33cc22334a8d03401bca9a4491e77306c1bcb940aca9ac293f6c237611c0723 WatchSource:0}: Error finding container e33cc22334a8d03401bca9a4491e77306c1bcb940aca9ac293f6c237611c0723: Status 404 returned error can't find the container with id e33cc22334a8d03401bca9a4491e77306c1bcb940aca9ac293f6c237611c0723 Nov 26 17:06:16 crc kubenswrapper[5010]: I1126 17:06:16.779187 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgrwf" event={"ID":"01e05e22-ea98-435b-a24a-1d46bd501bff","Type":"ContainerStarted","Data":"0b7f78224bef14c31f743c77d8b62088f208baf2293fa76358e3da3d4bd5b63b"} Nov 26 17:06:16 crc kubenswrapper[5010]: I1126 17:06:16.779637 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgrwf" event={"ID":"01e05e22-ea98-435b-a24a-1d46bd501bff","Type":"ContainerStarted","Data":"e33cc22334a8d03401bca9a4491e77306c1bcb940aca9ac293f6c237611c0723"} Nov 26 17:06:16 crc kubenswrapper[5010]: I1126 17:06:16.803588 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-mgrwf" podStartSLOduration=1.803568853 podStartE2EDuration="1.803568853s" podCreationTimestamp="2025-11-26 17:06:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:16.795661317 +0000 UTC m=+5997.586378465" watchObservedRunningTime="2025-11-26 17:06:16.803568853 +0000 UTC m=+5997.594286001" Nov 26 17:06:19 crc kubenswrapper[5010]: I1126 17:06:19.817351 5010 generic.go:334] "Generic (PLEG): container finished" podID="01e05e22-ea98-435b-a24a-1d46bd501bff" containerID="0b7f78224bef14c31f743c77d8b62088f208baf2293fa76358e3da3d4bd5b63b" exitCode=0 Nov 26 17:06:19 crc kubenswrapper[5010]: I1126 17:06:19.817455 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgrwf" event={"ID":"01e05e22-ea98-435b-a24a-1d46bd501bff","Type":"ContainerDied","Data":"0b7f78224bef14c31f743c77d8b62088f208baf2293fa76358e3da3d4bd5b63b"} Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.335550 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.472883 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-combined-ca-bundle\") pod \"01e05e22-ea98-435b-a24a-1d46bd501bff\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.473337 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cck72\" (UniqueName: \"kubernetes.io/projected/01e05e22-ea98-435b-a24a-1d46bd501bff-kube-api-access-cck72\") pod \"01e05e22-ea98-435b-a24a-1d46bd501bff\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.474148 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-db-sync-config-data\") pod \"01e05e22-ea98-435b-a24a-1d46bd501bff\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.474453 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-config-data\") pod \"01e05e22-ea98-435b-a24a-1d46bd501bff\" (UID: \"01e05e22-ea98-435b-a24a-1d46bd501bff\") " Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.479521 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01e05e22-ea98-435b-a24a-1d46bd501bff-kube-api-access-cck72" (OuterVolumeSpecName: "kube-api-access-cck72") pod "01e05e22-ea98-435b-a24a-1d46bd501bff" (UID: "01e05e22-ea98-435b-a24a-1d46bd501bff"). InnerVolumeSpecName "kube-api-access-cck72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.488908 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "01e05e22-ea98-435b-a24a-1d46bd501bff" (UID: "01e05e22-ea98-435b-a24a-1d46bd501bff"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.520641 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "01e05e22-ea98-435b-a24a-1d46bd501bff" (UID: "01e05e22-ea98-435b-a24a-1d46bd501bff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.548550 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-config-data" (OuterVolumeSpecName: "config-data") pod "01e05e22-ea98-435b-a24a-1d46bd501bff" (UID: "01e05e22-ea98-435b-a24a-1d46bd501bff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.578754 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.578843 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cck72\" (UniqueName: \"kubernetes.io/projected/01e05e22-ea98-435b-a24a-1d46bd501bff-kube-api-access-cck72\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.578869 5010 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.578893 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01e05e22-ea98-435b-a24a-1d46bd501bff-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.838897 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-mgrwf" event={"ID":"01e05e22-ea98-435b-a24a-1d46bd501bff","Type":"ContainerDied","Data":"e33cc22334a8d03401bca9a4491e77306c1bcb940aca9ac293f6c237611c0723"} Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.838979 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e33cc22334a8d03401bca9a4491e77306c1bcb940aca9ac293f6c237611c0723" Nov 26 17:06:21 crc kubenswrapper[5010]: I1126 17:06:21.839439 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-mgrwf" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.339125 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:22 crc kubenswrapper[5010]: E1126 17:06:22.339956 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01e05e22-ea98-435b-a24a-1d46bd501bff" containerName="glance-db-sync" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.339976 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="01e05e22-ea98-435b-a24a-1d46bd501bff" containerName="glance-db-sync" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.340217 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="01e05e22-ea98-435b-a24a-1d46bd501bff" containerName="glance-db-sync" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.341403 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.351194 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6988c6b67f-4dbcn"] Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.353355 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.355250 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.355490 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-7hf2b" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.356306 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.392839 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.417993 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6988c6b67f-4dbcn"] Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.458489 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.460461 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.462596 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.466545 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495615 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-dns-svc\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495666 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzqdt\" (UniqueName: \"kubernetes.io/projected/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-kube-api-access-rzqdt\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495744 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-sb\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495781 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-config-data\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495805 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-logs\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495830 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-nb\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495863 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495885 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-config\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495923 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-scripts\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.495980 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.496004 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w89m2\" (UniqueName: \"kubernetes.io/projected/31a50f49-b8ee-4511-8c6b-f31245536f56-kube-api-access-w89m2\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597565 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-dns-svc\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597606 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzqdt\" (UniqueName: \"kubernetes.io/projected/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-kube-api-access-rzqdt\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597653 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-sb\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597676 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-logs\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597697 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-config-data\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597729 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-logs\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597752 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-nb\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597768 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8j9z8\" (UniqueName: \"kubernetes.io/projected/be9e124c-dcb6-4852-a844-d81c832fd2ea-kube-api-access-8j9z8\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597794 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597816 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597839 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-config\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597871 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-scripts\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597894 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597913 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597934 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597965 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.597982 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w89m2\" (UniqueName: \"kubernetes.io/projected/31a50f49-b8ee-4511-8c6b-f31245536f56-kube-api-access-w89m2\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.599650 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-config\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.599677 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-nb\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.599650 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-sb\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.600018 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-dns-svc\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.600143 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-logs\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.600469 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.603553 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.604400 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-config-data\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.607444 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-scripts\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.621174 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzqdt\" (UniqueName: \"kubernetes.io/projected/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-kube-api-access-rzqdt\") pod \"glance-default-external-api-0\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.634955 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w89m2\" (UniqueName: \"kubernetes.io/projected/31a50f49-b8ee-4511-8c6b-f31245536f56-kube-api-access-w89m2\") pod \"dnsmasq-dns-6988c6b67f-4dbcn\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.673502 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.690932 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.699566 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8j9z8\" (UniqueName: \"kubernetes.io/projected/be9e124c-dcb6-4852-a844-d81c832fd2ea-kube-api-access-8j9z8\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.699613 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.699680 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.699696 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.699731 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.699804 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-logs\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.700173 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-logs\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.702351 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.703977 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-scripts\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.705521 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.717260 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-config-data\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.718581 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8j9z8\" (UniqueName: \"kubernetes.io/projected/be9e124c-dcb6-4852-a844-d81c832fd2ea-kube-api-access-8j9z8\") pod \"glance-default-internal-api-0\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:22 crc kubenswrapper[5010]: I1126 17:06:22.783361 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:23 crc kubenswrapper[5010]: W1126 17:06:23.343348 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e7a6080_d814_4ec1_b2c9_7c9568cccde8.slice/crio-3942a15d56ef142325fbac0778d66e8b1d9fd00781b812abed31e01cde1256fe WatchSource:0}: Error finding container 3942a15d56ef142325fbac0778d66e8b1d9fd00781b812abed31e01cde1256fe: Status 404 returned error can't find the container with id 3942a15d56ef142325fbac0778d66e8b1d9fd00781b812abed31e01cde1256fe Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.348996 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.396678 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6988c6b67f-4dbcn"] Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.438658 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:23 crc kubenswrapper[5010]: W1126 17:06:23.450177 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe9e124c_dcb6_4852_a844_d81c832fd2ea.slice/crio-503ec6efd4f337ccec095ac79c93c19e09d025e81b7146557ece250aacd7bb2b WatchSource:0}: Error finding container 503ec6efd4f337ccec095ac79c93c19e09d025e81b7146557ece250aacd7bb2b: Status 404 returned error can't find the container with id 503ec6efd4f337ccec095ac79c93c19e09d025e81b7146557ece250aacd7bb2b Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.661621 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.883177 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9e7a6080-d814-4ec1-b2c9-7c9568cccde8","Type":"ContainerStarted","Data":"3942a15d56ef142325fbac0778d66e8b1d9fd00781b812abed31e01cde1256fe"} Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.884400 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be9e124c-dcb6-4852-a844-d81c832fd2ea","Type":"ContainerStarted","Data":"503ec6efd4f337ccec095ac79c93c19e09d025e81b7146557ece250aacd7bb2b"} Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.886080 5010 generic.go:334] "Generic (PLEG): container finished" podID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerID="6d02c1b8b8b6d59d6382bdbb555dd8231a100cceca36c48cc3a28db1864c3393" exitCode=0 Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.886116 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" event={"ID":"31a50f49-b8ee-4511-8c6b-f31245536f56","Type":"ContainerDied","Data":"6d02c1b8b8b6d59d6382bdbb555dd8231a100cceca36c48cc3a28db1864c3393"} Nov 26 17:06:23 crc kubenswrapper[5010]: I1126 17:06:23.886136 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" event={"ID":"31a50f49-b8ee-4511-8c6b-f31245536f56","Type":"ContainerStarted","Data":"bbaee86b2326b8469f0bf7cc7cf3d0af688167a448655539e4574c1c92ee0160"} Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.571283 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.898142 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9e7a6080-d814-4ec1-b2c9-7c9568cccde8","Type":"ContainerStarted","Data":"f052c762b645efb8a6e9d715c9a1a01d323512ab59e01b5dce3c81f11628e1f7"} Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.898427 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9e7a6080-d814-4ec1-b2c9-7c9568cccde8","Type":"ContainerStarted","Data":"5266550e0d6b1035f769f884c554b6303a6c52b7b604f897e5f90253d117719e"} Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.898547 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-log" containerID="cri-o://5266550e0d6b1035f769f884c554b6303a6c52b7b604f897e5f90253d117719e" gracePeriod=30 Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.898735 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-httpd" containerID="cri-o://f052c762b645efb8a6e9d715c9a1a01d323512ab59e01b5dce3c81f11628e1f7" gracePeriod=30 Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.906020 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be9e124c-dcb6-4852-a844-d81c832fd2ea","Type":"ContainerStarted","Data":"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a"} Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.909387 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" event={"ID":"31a50f49-b8ee-4511-8c6b-f31245536f56","Type":"ContainerStarted","Data":"3c98591f5a402d9130156bff6033377e77cf83c2146f490e3d4223841bdf3cfc"} Nov 26 17:06:24 crc kubenswrapper[5010]: I1126 17:06:24.910039 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.527822 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.52779071 podStartE2EDuration="3.52779071s" podCreationTimestamp="2025-11-26 17:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:25.5149402 +0000 UTC m=+6006.305657348" watchObservedRunningTime="2025-11-26 17:06:25.52779071 +0000 UTC m=+6006.318507858" Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.541938 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" podStartSLOduration=3.541919391 podStartE2EDuration="3.541919391s" podCreationTimestamp="2025-11-26 17:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:25.535801779 +0000 UTC m=+6006.326518947" watchObservedRunningTime="2025-11-26 17:06:25.541919391 +0000 UTC m=+6006.332636539" Nov 26 17:06:25 crc kubenswrapper[5010]: E1126 17:06:25.555501 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9e7a6080_d814_4ec1_b2c9_7c9568cccde8.slice/crio-f052c762b645efb8a6e9d715c9a1a01d323512ab59e01b5dce3c81f11628e1f7.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.921583 5010 generic.go:334] "Generic (PLEG): container finished" podID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerID="f052c762b645efb8a6e9d715c9a1a01d323512ab59e01b5dce3c81f11628e1f7" exitCode=0 Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.921879 5010 generic.go:334] "Generic (PLEG): container finished" podID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerID="5266550e0d6b1035f769f884c554b6303a6c52b7b604f897e5f90253d117719e" exitCode=143 Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.921652 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9e7a6080-d814-4ec1-b2c9-7c9568cccde8","Type":"ContainerDied","Data":"f052c762b645efb8a6e9d715c9a1a01d323512ab59e01b5dce3c81f11628e1f7"} Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.921963 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9e7a6080-d814-4ec1-b2c9-7c9568cccde8","Type":"ContainerDied","Data":"5266550e0d6b1035f769f884c554b6303a6c52b7b604f897e5f90253d117719e"} Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.925605 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be9e124c-dcb6-4852-a844-d81c832fd2ea","Type":"ContainerStarted","Data":"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d"} Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.926120 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-log" containerID="cri-o://288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a" gracePeriod=30 Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.926373 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-httpd" containerID="cri-o://be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d" gracePeriod=30 Nov 26 17:06:25 crc kubenswrapper[5010]: I1126 17:06:25.957093 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.957075944 podStartE2EDuration="3.957075944s" podCreationTimestamp="2025-11-26 17:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:25.951777962 +0000 UTC m=+6006.742495120" watchObservedRunningTime="2025-11-26 17:06:25.957075944 +0000 UTC m=+6006.747793092" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.114222 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284156 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzqdt\" (UniqueName: \"kubernetes.io/projected/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-kube-api-access-rzqdt\") pod \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284216 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-config-data\") pod \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284348 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-scripts\") pod \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284456 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-logs\") pod \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284512 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-httpd-run\") pod \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284534 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-combined-ca-bundle\") pod \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\" (UID: \"9e7a6080-d814-4ec1-b2c9-7c9568cccde8\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284821 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-logs" (OuterVolumeSpecName: "logs") pod "9e7a6080-d814-4ec1-b2c9-7c9568cccde8" (UID: "9e7a6080-d814-4ec1-b2c9-7c9568cccde8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.284855 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "9e7a6080-d814-4ec1-b2c9-7c9568cccde8" (UID: "9e7a6080-d814-4ec1-b2c9-7c9568cccde8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.285006 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.285023 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.290783 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-scripts" (OuterVolumeSpecName: "scripts") pod "9e7a6080-d814-4ec1-b2c9-7c9568cccde8" (UID: "9e7a6080-d814-4ec1-b2c9-7c9568cccde8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.291123 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-kube-api-access-rzqdt" (OuterVolumeSpecName: "kube-api-access-rzqdt") pod "9e7a6080-d814-4ec1-b2c9-7c9568cccde8" (UID: "9e7a6080-d814-4ec1-b2c9-7c9568cccde8"). InnerVolumeSpecName "kube-api-access-rzqdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.310569 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e7a6080-d814-4ec1-b2c9-7c9568cccde8" (UID: "9e7a6080-d814-4ec1-b2c9-7c9568cccde8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.365181 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-config-data" (OuterVolumeSpecName: "config-data") pod "9e7a6080-d814-4ec1-b2c9-7c9568cccde8" (UID: "9e7a6080-d814-4ec1-b2c9-7c9568cccde8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.386727 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.386763 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzqdt\" (UniqueName: \"kubernetes.io/projected/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-kube-api-access-rzqdt\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.386773 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.386781 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e7a6080-d814-4ec1-b2c9-7c9568cccde8-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.480361 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.588694 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-scripts\") pod \"be9e124c-dcb6-4852-a844-d81c832fd2ea\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.588775 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-httpd-run\") pod \"be9e124c-dcb6-4852-a844-d81c832fd2ea\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.588813 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-combined-ca-bundle\") pod \"be9e124c-dcb6-4852-a844-d81c832fd2ea\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.589215 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-config-data\") pod \"be9e124c-dcb6-4852-a844-d81c832fd2ea\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.589246 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8j9z8\" (UniqueName: \"kubernetes.io/projected/be9e124c-dcb6-4852-a844-d81c832fd2ea-kube-api-access-8j9z8\") pod \"be9e124c-dcb6-4852-a844-d81c832fd2ea\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.589280 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-logs\") pod \"be9e124c-dcb6-4852-a844-d81c832fd2ea\" (UID: \"be9e124c-dcb6-4852-a844-d81c832fd2ea\") " Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.589414 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "be9e124c-dcb6-4852-a844-d81c832fd2ea" (UID: "be9e124c-dcb6-4852-a844-d81c832fd2ea"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.589974 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.590375 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-logs" (OuterVolumeSpecName: "logs") pod "be9e124c-dcb6-4852-a844-d81c832fd2ea" (UID: "be9e124c-dcb6-4852-a844-d81c832fd2ea"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.593414 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be9e124c-dcb6-4852-a844-d81c832fd2ea-kube-api-access-8j9z8" (OuterVolumeSpecName: "kube-api-access-8j9z8") pod "be9e124c-dcb6-4852-a844-d81c832fd2ea" (UID: "be9e124c-dcb6-4852-a844-d81c832fd2ea"). InnerVolumeSpecName "kube-api-access-8j9z8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.593508 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-scripts" (OuterVolumeSpecName: "scripts") pod "be9e124c-dcb6-4852-a844-d81c832fd2ea" (UID: "be9e124c-dcb6-4852-a844-d81c832fd2ea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.621032 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be9e124c-dcb6-4852-a844-d81c832fd2ea" (UID: "be9e124c-dcb6-4852-a844-d81c832fd2ea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.638799 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-config-data" (OuterVolumeSpecName: "config-data") pod "be9e124c-dcb6-4852-a844-d81c832fd2ea" (UID: "be9e124c-dcb6-4852-a844-d81c832fd2ea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.691794 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.691827 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8j9z8\" (UniqueName: \"kubernetes.io/projected/be9e124c-dcb6-4852-a844-d81c832fd2ea-kube-api-access-8j9z8\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.691837 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/be9e124c-dcb6-4852-a844-d81c832fd2ea-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.691845 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.691854 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be9e124c-dcb6-4852-a844-d81c832fd2ea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.938434 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9e7a6080-d814-4ec1-b2c9-7c9568cccde8","Type":"ContainerDied","Data":"3942a15d56ef142325fbac0778d66e8b1d9fd00781b812abed31e01cde1256fe"} Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.938448 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.938497 5010 scope.go:117] "RemoveContainer" containerID="f052c762b645efb8a6e9d715c9a1a01d323512ab59e01b5dce3c81f11628e1f7" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.943687 5010 generic.go:334] "Generic (PLEG): container finished" podID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerID="be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d" exitCode=0 Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.943738 5010 generic.go:334] "Generic (PLEG): container finished" podID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerID="288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a" exitCode=143 Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.943754 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be9e124c-dcb6-4852-a844-d81c832fd2ea","Type":"ContainerDied","Data":"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d"} Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.943785 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.943798 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be9e124c-dcb6-4852-a844-d81c832fd2ea","Type":"ContainerDied","Data":"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a"} Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.943814 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"be9e124c-dcb6-4852-a844-d81c832fd2ea","Type":"ContainerDied","Data":"503ec6efd4f337ccec095ac79c93c19e09d025e81b7146557ece250aacd7bb2b"} Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.979460 5010 scope.go:117] "RemoveContainer" containerID="5266550e0d6b1035f769f884c554b6303a6c52b7b604f897e5f90253d117719e" Nov 26 17:06:26 crc kubenswrapper[5010]: I1126 17:06:26.988656 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.009415 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.011612 5010 scope.go:117] "RemoveContainer" containerID="be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.023307 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.033612 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.043105 5010 scope.go:117] "RemoveContainer" containerID="288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.062952 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: E1126 17:06:27.063477 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-httpd" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063504 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-httpd" Nov 26 17:06:27 crc kubenswrapper[5010]: E1126 17:06:27.063529 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-log" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063539 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-log" Nov 26 17:06:27 crc kubenswrapper[5010]: E1126 17:06:27.063553 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-httpd" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063561 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-httpd" Nov 26 17:06:27 crc kubenswrapper[5010]: E1126 17:06:27.063591 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-log" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063600 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-log" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063833 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-log" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063858 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" containerName="glance-httpd" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063884 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-httpd" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.063901 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" containerName="glance-log" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.065123 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.068620 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.068917 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.069056 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.069215 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-7hf2b" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.088990 5010 scope.go:117] "RemoveContainer" containerID="be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d" Nov 26 17:06:27 crc kubenswrapper[5010]: E1126 17:06:27.091698 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d\": container with ID starting with be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d not found: ID does not exist" containerID="be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.091770 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d"} err="failed to get container status \"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d\": rpc error: code = NotFound desc = could not find container \"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d\": container with ID starting with be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d not found: ID does not exist" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.091803 5010 scope.go:117] "RemoveContainer" containerID="288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.096189 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: E1126 17:06:27.096655 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a\": container with ID starting with 288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a not found: ID does not exist" containerID="288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.096695 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a"} err="failed to get container status \"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a\": rpc error: code = NotFound desc = could not find container \"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a\": container with ID starting with 288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a not found: ID does not exist" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.096745 5010 scope.go:117] "RemoveContainer" containerID="be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.103235 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d"} err="failed to get container status \"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d\": rpc error: code = NotFound desc = could not find container \"be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d\": container with ID starting with be68a237a9b242a51ee7a08a889c022822b82c000283d051367b217a28a5182d not found: ID does not exist" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.103291 5010 scope.go:117] "RemoveContainer" containerID="288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.104124 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a"} err="failed to get container status \"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a\": rpc error: code = NotFound desc = could not find container \"288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a\": container with ID starting with 288470c2000cc4f0b45a470afe45f5742ad53a142963a1118af8bec49587061a not found: ID does not exist" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.107808 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.110312 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.112730 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.115048 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.117922 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.203940 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-logs\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204226 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204418 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-config-data\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204530 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204593 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fg789\" (UniqueName: \"kubernetes.io/projected/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-kube-api-access-fg789\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204625 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204657 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204699 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204740 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204788 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204820 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h7k6x\" (UniqueName: \"kubernetes.io/projected/ccadce3b-18c9-4b3a-b06f-4f810ef81554-kube-api-access-h7k6x\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204846 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-scripts\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204896 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-logs\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.204916 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.306795 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-logs\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.306841 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.306890 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-logs\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.306914 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.306991 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-config-data\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307012 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307037 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fg789\" (UniqueName: \"kubernetes.io/projected/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-kube-api-access-fg789\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307064 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307089 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307128 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307160 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307201 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307230 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h7k6x\" (UniqueName: \"kubernetes.io/projected/ccadce3b-18c9-4b3a-b06f-4f810ef81554-kube-api-access-h7k6x\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307257 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-scripts\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.307592 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-logs\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.308137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-logs\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.308213 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.308274 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.313268 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.313383 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.313476 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-scripts\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.313523 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-config-data\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.314227 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-config-data\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.314385 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-scripts\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.315397 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.316246 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.323805 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h7k6x\" (UniqueName: \"kubernetes.io/projected/ccadce3b-18c9-4b3a-b06f-4f810ef81554-kube-api-access-h7k6x\") pod \"glance-default-external-api-0\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.332729 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fg789\" (UniqueName: \"kubernetes.io/projected/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-kube-api-access-fg789\") pod \"glance-default-internal-api-0\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.395587 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.433409 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.903460 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e7a6080-d814-4ec1-b2c9-7c9568cccde8" path="/var/lib/kubelet/pods/9e7a6080-d814-4ec1-b2c9-7c9568cccde8/volumes" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.904606 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be9e124c-dcb6-4852-a844-d81c832fd2ea" path="/var/lib/kubelet/pods/be9e124c-dcb6-4852-a844-d81c832fd2ea/volumes" Nov 26 17:06:27 crc kubenswrapper[5010]: I1126 17:06:27.965618 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:06:27 crc kubenswrapper[5010]: W1126 17:06:27.967884 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podccadce3b_18c9_4b3a_b06f_4f810ef81554.slice/crio-0e8b55217f5a3c34dabf3fac70f2e72b41a3796443cafceb4f67c2b140ae191c WatchSource:0}: Error finding container 0e8b55217f5a3c34dabf3fac70f2e72b41a3796443cafceb4f67c2b140ae191c: Status 404 returned error can't find the container with id 0e8b55217f5a3c34dabf3fac70f2e72b41a3796443cafceb4f67c2b140ae191c Nov 26 17:06:28 crc kubenswrapper[5010]: I1126 17:06:28.062318 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:06:28 crc kubenswrapper[5010]: W1126 17:06:28.066555 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddd6fbf3e_afa8_4d34_965a_8ba491a85e81.slice/crio-edbfd8f9f544d5e29ef14c0fbbb21c805b25797159dcc17cb03395ac3514338d WatchSource:0}: Error finding container edbfd8f9f544d5e29ef14c0fbbb21c805b25797159dcc17cb03395ac3514338d: Status 404 returned error can't find the container with id edbfd8f9f544d5e29ef14c0fbbb21c805b25797159dcc17cb03395ac3514338d Nov 26 17:06:28 crc kubenswrapper[5010]: I1126 17:06:28.963760 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6fbf3e-afa8-4d34-965a-8ba491a85e81","Type":"ContainerStarted","Data":"612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317"} Nov 26 17:06:28 crc kubenswrapper[5010]: I1126 17:06:28.964077 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6fbf3e-afa8-4d34-965a-8ba491a85e81","Type":"ContainerStarted","Data":"edbfd8f9f544d5e29ef14c0fbbb21c805b25797159dcc17cb03395ac3514338d"} Nov 26 17:06:28 crc kubenswrapper[5010]: I1126 17:06:28.966072 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ccadce3b-18c9-4b3a-b06f-4f810ef81554","Type":"ContainerStarted","Data":"8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3"} Nov 26 17:06:28 crc kubenswrapper[5010]: I1126 17:06:28.966112 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ccadce3b-18c9-4b3a-b06f-4f810ef81554","Type":"ContainerStarted","Data":"0e8b55217f5a3c34dabf3fac70f2e72b41a3796443cafceb4f67c2b140ae191c"} Nov 26 17:06:29 crc kubenswrapper[5010]: I1126 17:06:29.976582 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6fbf3e-afa8-4d34-965a-8ba491a85e81","Type":"ContainerStarted","Data":"be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0"} Nov 26 17:06:29 crc kubenswrapper[5010]: I1126 17:06:29.979647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ccadce3b-18c9-4b3a-b06f-4f810ef81554","Type":"ContainerStarted","Data":"11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec"} Nov 26 17:06:30 crc kubenswrapper[5010]: I1126 17:06:30.005377 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.005353002 podStartE2EDuration="3.005353002s" podCreationTimestamp="2025-11-26 17:06:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:29.996229835 +0000 UTC m=+6010.786946983" watchObservedRunningTime="2025-11-26 17:06:30.005353002 +0000 UTC m=+6010.796070150" Nov 26 17:06:30 crc kubenswrapper[5010]: I1126 17:06:30.027167 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.027141453 podStartE2EDuration="4.027141453s" podCreationTimestamp="2025-11-26 17:06:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:30.014693834 +0000 UTC m=+6010.805410992" watchObservedRunningTime="2025-11-26 17:06:30.027141453 +0000 UTC m=+6010.817858621" Nov 26 17:06:32 crc kubenswrapper[5010]: I1126 17:06:32.693010 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:06:32 crc kubenswrapper[5010]: I1126 17:06:32.781310 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg"] Nov 26 17:06:32 crc kubenswrapper[5010]: I1126 17:06:32.781648 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" podUID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerName="dnsmasq-dns" containerID="cri-o://81f63b3382fa1f6590b66cd8c7ec7644264a36611d2990f8226e8e1bda34f2e3" gracePeriod=10 Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.013572 5010 generic.go:334] "Generic (PLEG): container finished" podID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerID="81f63b3382fa1f6590b66cd8c7ec7644264a36611d2990f8226e8e1bda34f2e3" exitCode=0 Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.013613 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" event={"ID":"f4e11ddd-5ed3-4e07-bd88-d132154296e0","Type":"ContainerDied","Data":"81f63b3382fa1f6590b66cd8c7ec7644264a36611d2990f8226e8e1bda34f2e3"} Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.380020 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.528649 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-nb\") pod \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.528778 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-config\") pod \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.528850 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-sb\") pod \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.528920 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-dns-svc\") pod \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.528942 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbzvp\" (UniqueName: \"kubernetes.io/projected/f4e11ddd-5ed3-4e07-bd88-d132154296e0-kube-api-access-qbzvp\") pod \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\" (UID: \"f4e11ddd-5ed3-4e07-bd88-d132154296e0\") " Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.544675 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4e11ddd-5ed3-4e07-bd88-d132154296e0-kube-api-access-qbzvp" (OuterVolumeSpecName: "kube-api-access-qbzvp") pod "f4e11ddd-5ed3-4e07-bd88-d132154296e0" (UID: "f4e11ddd-5ed3-4e07-bd88-d132154296e0"). InnerVolumeSpecName "kube-api-access-qbzvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.586990 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f4e11ddd-5ed3-4e07-bd88-d132154296e0" (UID: "f4e11ddd-5ed3-4e07-bd88-d132154296e0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.591283 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-config" (OuterVolumeSpecName: "config") pod "f4e11ddd-5ed3-4e07-bd88-d132154296e0" (UID: "f4e11ddd-5ed3-4e07-bd88-d132154296e0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.591425 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f4e11ddd-5ed3-4e07-bd88-d132154296e0" (UID: "f4e11ddd-5ed3-4e07-bd88-d132154296e0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.597517 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f4e11ddd-5ed3-4e07-bd88-d132154296e0" (UID: "f4e11ddd-5ed3-4e07-bd88-d132154296e0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.631851 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.631888 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.631897 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.631904 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f4e11ddd-5ed3-4e07-bd88-d132154296e0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:33 crc kubenswrapper[5010]: I1126 17:06:33.631914 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbzvp\" (UniqueName: \"kubernetes.io/projected/f4e11ddd-5ed3-4e07-bd88-d132154296e0-kube-api-access-qbzvp\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:34 crc kubenswrapper[5010]: I1126 17:06:34.039412 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" event={"ID":"f4e11ddd-5ed3-4e07-bd88-d132154296e0","Type":"ContainerDied","Data":"8344b13fc745a337c966991a83bc20b683c3b9d6dc412bf81c51a852beb77e33"} Nov 26 17:06:34 crc kubenswrapper[5010]: I1126 17:06:34.039496 5010 scope.go:117] "RemoveContainer" containerID="81f63b3382fa1f6590b66cd8c7ec7644264a36611d2990f8226e8e1bda34f2e3" Nov 26 17:06:34 crc kubenswrapper[5010]: I1126 17:06:34.039548 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg" Nov 26 17:06:34 crc kubenswrapper[5010]: I1126 17:06:34.077900 5010 scope.go:117] "RemoveContainer" containerID="f934f33c7c76791a952a6149276975bb54df481233b95422be96189ca09f351d" Nov 26 17:06:34 crc kubenswrapper[5010]: I1126 17:06:34.089441 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg"] Nov 26 17:06:34 crc kubenswrapper[5010]: I1126 17:06:34.102967 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bfbdbfc4c-cmhwg"] Nov 26 17:06:35 crc kubenswrapper[5010]: I1126 17:06:35.908035 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" path="/var/lib/kubelet/pods/f4e11ddd-5ed3-4e07-bd88-d132154296e0/volumes" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.396323 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.397008 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.433906 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.434395 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.434934 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.479418 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.481573 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:37 crc kubenswrapper[5010]: I1126 17:06:37.523425 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:38 crc kubenswrapper[5010]: I1126 17:06:38.080395 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:38 crc kubenswrapper[5010]: I1126 17:06:38.080491 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 17:06:38 crc kubenswrapper[5010]: I1126 17:06:38.080524 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 17:06:38 crc kubenswrapper[5010]: I1126 17:06:38.080549 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:39 crc kubenswrapper[5010]: I1126 17:06:39.956397 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:40 crc kubenswrapper[5010]: I1126 17:06:40.070057 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 17:06:40 crc kubenswrapper[5010]: I1126 17:06:40.109616 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 17:06:40 crc kubenswrapper[5010]: I1126 17:06:40.109616 5010 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 17:06:40 crc kubenswrapper[5010]: I1126 17:06:40.119163 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 17:06:40 crc kubenswrapper[5010]: I1126 17:06:40.163973 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.285900 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-7f87h"] Nov 26 17:06:48 crc kubenswrapper[5010]: E1126 17:06:48.287264 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerName="dnsmasq-dns" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.287282 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerName="dnsmasq-dns" Nov 26 17:06:48 crc kubenswrapper[5010]: E1126 17:06:48.287302 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerName="init" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.287309 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerName="init" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.287539 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4e11ddd-5ed3-4e07-bd88-d132154296e0" containerName="dnsmasq-dns" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.288295 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.295794 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-7f87h"] Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.337438 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70f6ece9-ed80-47e5-9da8-7d958b8da066-operator-scripts\") pod \"placement-db-create-7f87h\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.337667 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69pjk\" (UniqueName: \"kubernetes.io/projected/70f6ece9-ed80-47e5-9da8-7d958b8da066-kube-api-access-69pjk\") pod \"placement-db-create-7f87h\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.347505 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-9fe4-account-create-update-t4fxr"] Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.348841 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.355299 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.356191 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9fe4-account-create-update-t4fxr"] Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.439966 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70f6ece9-ed80-47e5-9da8-7d958b8da066-operator-scripts\") pod \"placement-db-create-7f87h\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.440011 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69pjk\" (UniqueName: \"kubernetes.io/projected/70f6ece9-ed80-47e5-9da8-7d958b8da066-kube-api-access-69pjk\") pod \"placement-db-create-7f87h\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.440047 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f975d\" (UniqueName: \"kubernetes.io/projected/37efa8b7-9526-4456-b8fb-3f637f7b03ba-kube-api-access-f975d\") pod \"placement-9fe4-account-create-update-t4fxr\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.440196 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37efa8b7-9526-4456-b8fb-3f637f7b03ba-operator-scripts\") pod \"placement-9fe4-account-create-update-t4fxr\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.441179 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70f6ece9-ed80-47e5-9da8-7d958b8da066-operator-scripts\") pod \"placement-db-create-7f87h\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.460279 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69pjk\" (UniqueName: \"kubernetes.io/projected/70f6ece9-ed80-47e5-9da8-7d958b8da066-kube-api-access-69pjk\") pod \"placement-db-create-7f87h\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.542062 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37efa8b7-9526-4456-b8fb-3f637f7b03ba-operator-scripts\") pod \"placement-9fe4-account-create-update-t4fxr\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.542339 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f975d\" (UniqueName: \"kubernetes.io/projected/37efa8b7-9526-4456-b8fb-3f637f7b03ba-kube-api-access-f975d\") pod \"placement-9fe4-account-create-update-t4fxr\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.542939 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37efa8b7-9526-4456-b8fb-3f637f7b03ba-operator-scripts\") pod \"placement-9fe4-account-create-update-t4fxr\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.558161 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f975d\" (UniqueName: \"kubernetes.io/projected/37efa8b7-9526-4456-b8fb-3f637f7b03ba-kube-api-access-f975d\") pod \"placement-9fe4-account-create-update-t4fxr\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.614056 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7f87h" Nov 26 17:06:48 crc kubenswrapper[5010]: I1126 17:06:48.666682 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:49 crc kubenswrapper[5010]: I1126 17:06:49.131121 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-7f87h"] Nov 26 17:06:49 crc kubenswrapper[5010]: W1126 17:06:49.137490 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod70f6ece9_ed80_47e5_9da8_7d958b8da066.slice/crio-d82853a462300f3b8c070607dd3d03e19c7a8da82a17795e217170cafe6b14ee WatchSource:0}: Error finding container d82853a462300f3b8c070607dd3d03e19c7a8da82a17795e217170cafe6b14ee: Status 404 returned error can't find the container with id d82853a462300f3b8c070607dd3d03e19c7a8da82a17795e217170cafe6b14ee Nov 26 17:06:49 crc kubenswrapper[5010]: I1126 17:06:49.201772 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9fe4-account-create-update-t4fxr"] Nov 26 17:06:49 crc kubenswrapper[5010]: I1126 17:06:49.202861 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7f87h" event={"ID":"70f6ece9-ed80-47e5-9da8-7d958b8da066","Type":"ContainerStarted","Data":"d82853a462300f3b8c070607dd3d03e19c7a8da82a17795e217170cafe6b14ee"} Nov 26 17:06:50 crc kubenswrapper[5010]: I1126 17:06:50.219820 5010 generic.go:334] "Generic (PLEG): container finished" podID="70f6ece9-ed80-47e5-9da8-7d958b8da066" containerID="671fe32738a86be9e90208842b66a407a2c2b627c2efcd25ede6f23bbeeb9a7c" exitCode=0 Nov 26 17:06:50 crc kubenswrapper[5010]: I1126 17:06:50.219907 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7f87h" event={"ID":"70f6ece9-ed80-47e5-9da8-7d958b8da066","Type":"ContainerDied","Data":"671fe32738a86be9e90208842b66a407a2c2b627c2efcd25ede6f23bbeeb9a7c"} Nov 26 17:06:50 crc kubenswrapper[5010]: I1126 17:06:50.223812 5010 generic.go:334] "Generic (PLEG): container finished" podID="37efa8b7-9526-4456-b8fb-3f637f7b03ba" containerID="fce637e741eb6549f09d92349a058b0ce245a0b06e4ba6e11805c6b8ed1b6a94" exitCode=0 Nov 26 17:06:50 crc kubenswrapper[5010]: I1126 17:06:50.223870 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9fe4-account-create-update-t4fxr" event={"ID":"37efa8b7-9526-4456-b8fb-3f637f7b03ba","Type":"ContainerDied","Data":"fce637e741eb6549f09d92349a058b0ce245a0b06e4ba6e11805c6b8ed1b6a94"} Nov 26 17:06:50 crc kubenswrapper[5010]: I1126 17:06:50.223905 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9fe4-account-create-update-t4fxr" event={"ID":"37efa8b7-9526-4456-b8fb-3f637f7b03ba","Type":"ContainerStarted","Data":"5d6a9eb63fe05d3e2d1996adc2d7cb405b03ecf83cb9a45da7293742656b094c"} Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.708890 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7f87h" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.717723 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.809398 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70f6ece9-ed80-47e5-9da8-7d958b8da066-operator-scripts\") pod \"70f6ece9-ed80-47e5-9da8-7d958b8da066\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.809509 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37efa8b7-9526-4456-b8fb-3f637f7b03ba-operator-scripts\") pod \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.809546 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-69pjk\" (UniqueName: \"kubernetes.io/projected/70f6ece9-ed80-47e5-9da8-7d958b8da066-kube-api-access-69pjk\") pod \"70f6ece9-ed80-47e5-9da8-7d958b8da066\" (UID: \"70f6ece9-ed80-47e5-9da8-7d958b8da066\") " Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.809729 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f975d\" (UniqueName: \"kubernetes.io/projected/37efa8b7-9526-4456-b8fb-3f637f7b03ba-kube-api-access-f975d\") pod \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\" (UID: \"37efa8b7-9526-4456-b8fb-3f637f7b03ba\") " Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.810146 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/70f6ece9-ed80-47e5-9da8-7d958b8da066-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "70f6ece9-ed80-47e5-9da8-7d958b8da066" (UID: "70f6ece9-ed80-47e5-9da8-7d958b8da066"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.810187 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37efa8b7-9526-4456-b8fb-3f637f7b03ba-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "37efa8b7-9526-4456-b8fb-3f637f7b03ba" (UID: "37efa8b7-9526-4456-b8fb-3f637f7b03ba"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.817925 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37efa8b7-9526-4456-b8fb-3f637f7b03ba-kube-api-access-f975d" (OuterVolumeSpecName: "kube-api-access-f975d") pod "37efa8b7-9526-4456-b8fb-3f637f7b03ba" (UID: "37efa8b7-9526-4456-b8fb-3f637f7b03ba"). InnerVolumeSpecName "kube-api-access-f975d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.817996 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70f6ece9-ed80-47e5-9da8-7d958b8da066-kube-api-access-69pjk" (OuterVolumeSpecName: "kube-api-access-69pjk") pod "70f6ece9-ed80-47e5-9da8-7d958b8da066" (UID: "70f6ece9-ed80-47e5-9da8-7d958b8da066"). InnerVolumeSpecName "kube-api-access-69pjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.911984 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/37efa8b7-9526-4456-b8fb-3f637f7b03ba-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.912032 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-69pjk\" (UniqueName: \"kubernetes.io/projected/70f6ece9-ed80-47e5-9da8-7d958b8da066-kube-api-access-69pjk\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.912052 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f975d\" (UniqueName: \"kubernetes.io/projected/37efa8b7-9526-4456-b8fb-3f637f7b03ba-kube-api-access-f975d\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:51 crc kubenswrapper[5010]: I1126 17:06:51.912070 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/70f6ece9-ed80-47e5-9da8-7d958b8da066-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:52 crc kubenswrapper[5010]: I1126 17:06:52.252240 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-7f87h" event={"ID":"70f6ece9-ed80-47e5-9da8-7d958b8da066","Type":"ContainerDied","Data":"d82853a462300f3b8c070607dd3d03e19c7a8da82a17795e217170cafe6b14ee"} Nov 26 17:06:52 crc kubenswrapper[5010]: I1126 17:06:52.252303 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d82853a462300f3b8c070607dd3d03e19c7a8da82a17795e217170cafe6b14ee" Nov 26 17:06:52 crc kubenswrapper[5010]: I1126 17:06:52.252625 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-7f87h" Nov 26 17:06:52 crc kubenswrapper[5010]: I1126 17:06:52.256867 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9fe4-account-create-update-t4fxr" event={"ID":"37efa8b7-9526-4456-b8fb-3f637f7b03ba","Type":"ContainerDied","Data":"5d6a9eb63fe05d3e2d1996adc2d7cb405b03ecf83cb9a45da7293742656b094c"} Nov 26 17:06:52 crc kubenswrapper[5010]: I1126 17:06:52.256948 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d6a9eb63fe05d3e2d1996adc2d7cb405b03ecf83cb9a45da7293742656b094c" Nov 26 17:06:52 crc kubenswrapper[5010]: I1126 17:06:52.256912 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9fe4-account-create-update-t4fxr" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.702348 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-866b588b8c-lgtkl"] Nov 26 17:06:53 crc kubenswrapper[5010]: E1126 17:06:53.703072 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37efa8b7-9526-4456-b8fb-3f637f7b03ba" containerName="mariadb-account-create-update" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.703086 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="37efa8b7-9526-4456-b8fb-3f637f7b03ba" containerName="mariadb-account-create-update" Nov 26 17:06:53 crc kubenswrapper[5010]: E1126 17:06:53.703098 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70f6ece9-ed80-47e5-9da8-7d958b8da066" containerName="mariadb-database-create" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.703104 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="70f6ece9-ed80-47e5-9da8-7d958b8da066" containerName="mariadb-database-create" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.703257 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="70f6ece9-ed80-47e5-9da8-7d958b8da066" containerName="mariadb-database-create" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.703281 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="37efa8b7-9526-4456-b8fb-3f637f7b03ba" containerName="mariadb-account-create-update" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.705026 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.740275 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-866b588b8c-lgtkl"] Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.749845 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-config\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.749924 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-nb\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.749954 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-sb\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.750031 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tn8g\" (UniqueName: \"kubernetes.io/projected/58bd9152-e037-4d43-abf2-513e32b0eb0a-kube-api-access-5tn8g\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.750061 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-dns-svc\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.759974 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-j2gdw"] Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.761212 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.765280 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.765508 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gkj78" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.765649 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.788687 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-j2gdw"] Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.852759 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-nb\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.852817 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-config-data\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.852849 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-sb\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.852931 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52088ec4-ddbc-4524-83e8-ca6d029082fc-logs\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.852956 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-combined-ca-bundle\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.852982 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tn8g\" (UniqueName: \"kubernetes.io/projected/58bd9152-e037-4d43-abf2-513e32b0eb0a-kube-api-access-5tn8g\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.853010 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-scripts\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.853035 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-dns-svc\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.853068 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bt4lq\" (UniqueName: \"kubernetes.io/projected/52088ec4-ddbc-4524-83e8-ca6d029082fc-kube-api-access-bt4lq\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.853106 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-config\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.853675 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-nb\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.853913 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-config\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.854289 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-sb\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.854875 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-dns-svc\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.874772 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tn8g\" (UniqueName: \"kubernetes.io/projected/58bd9152-e037-4d43-abf2-513e32b0eb0a-kube-api-access-5tn8g\") pod \"dnsmasq-dns-866b588b8c-lgtkl\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.954625 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52088ec4-ddbc-4524-83e8-ca6d029082fc-logs\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.955167 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-combined-ca-bundle\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.955205 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-scripts\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.955243 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bt4lq\" (UniqueName: \"kubernetes.io/projected/52088ec4-ddbc-4524-83e8-ca6d029082fc-kube-api-access-bt4lq\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.955309 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-config-data\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.957119 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52088ec4-ddbc-4524-83e8-ca6d029082fc-logs\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.965024 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-combined-ca-bundle\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.972692 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-config-data\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.974211 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-scripts\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:53 crc kubenswrapper[5010]: I1126 17:06:53.982955 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bt4lq\" (UniqueName: \"kubernetes.io/projected/52088ec4-ddbc-4524-83e8-ca6d029082fc-kube-api-access-bt4lq\") pod \"placement-db-sync-j2gdw\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:54 crc kubenswrapper[5010]: I1126 17:06:54.029943 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:54 crc kubenswrapper[5010]: I1126 17:06:54.091528 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:54 crc kubenswrapper[5010]: I1126 17:06:54.585128 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-866b588b8c-lgtkl"] Nov 26 17:06:54 crc kubenswrapper[5010]: I1126 17:06:54.688414 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-j2gdw"] Nov 26 17:06:54 crc kubenswrapper[5010]: W1126 17:06:54.712912 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod52088ec4_ddbc_4524_83e8_ca6d029082fc.slice/crio-3bd3e4043dd87f13127a15fc0d2c5e1a6a6780b1de180679dd531b11c1f4fade WatchSource:0}: Error finding container 3bd3e4043dd87f13127a15fc0d2c5e1a6a6780b1de180679dd531b11c1f4fade: Status 404 returned error can't find the container with id 3bd3e4043dd87f13127a15fc0d2c5e1a6a6780b1de180679dd531b11c1f4fade Nov 26 17:06:55 crc kubenswrapper[5010]: I1126 17:06:55.291699 5010 generic.go:334] "Generic (PLEG): container finished" podID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerID="70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8" exitCode=0 Nov 26 17:06:55 crc kubenswrapper[5010]: I1126 17:06:55.291844 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" event={"ID":"58bd9152-e037-4d43-abf2-513e32b0eb0a","Type":"ContainerDied","Data":"70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8"} Nov 26 17:06:55 crc kubenswrapper[5010]: I1126 17:06:55.292206 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" event={"ID":"58bd9152-e037-4d43-abf2-513e32b0eb0a","Type":"ContainerStarted","Data":"4bc4947b8234690adf33a38e5260edb6197aa29237e3e320046a644660a60e70"} Nov 26 17:06:55 crc kubenswrapper[5010]: I1126 17:06:55.296014 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j2gdw" event={"ID":"52088ec4-ddbc-4524-83e8-ca6d029082fc","Type":"ContainerStarted","Data":"0e53fa8c2e11c7da51c9ca09f6f0926bfd55f433e8922611449b5f51abb466c9"} Nov 26 17:06:55 crc kubenswrapper[5010]: I1126 17:06:55.296073 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j2gdw" event={"ID":"52088ec4-ddbc-4524-83e8-ca6d029082fc","Type":"ContainerStarted","Data":"3bd3e4043dd87f13127a15fc0d2c5e1a6a6780b1de180679dd531b11c1f4fade"} Nov 26 17:06:55 crc kubenswrapper[5010]: I1126 17:06:55.345172 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-j2gdw" podStartSLOduration=2.34515626 podStartE2EDuration="2.34515626s" podCreationTimestamp="2025-11-26 17:06:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:55.3431138 +0000 UTC m=+6036.133830978" watchObservedRunningTime="2025-11-26 17:06:55.34515626 +0000 UTC m=+6036.135873408" Nov 26 17:06:56 crc kubenswrapper[5010]: I1126 17:06:56.310719 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" event={"ID":"58bd9152-e037-4d43-abf2-513e32b0eb0a","Type":"ContainerStarted","Data":"9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632"} Nov 26 17:06:56 crc kubenswrapper[5010]: I1126 17:06:56.310781 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:06:56 crc kubenswrapper[5010]: I1126 17:06:56.336413 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" podStartSLOduration=3.336390327 podStartE2EDuration="3.336390327s" podCreationTimestamp="2025-11-26 17:06:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:06:56.331501895 +0000 UTC m=+6037.122219043" watchObservedRunningTime="2025-11-26 17:06:56.336390327 +0000 UTC m=+6037.127107475" Nov 26 17:06:57 crc kubenswrapper[5010]: I1126 17:06:57.328692 5010 generic.go:334] "Generic (PLEG): container finished" podID="52088ec4-ddbc-4524-83e8-ca6d029082fc" containerID="0e53fa8c2e11c7da51c9ca09f6f0926bfd55f433e8922611449b5f51abb466c9" exitCode=0 Nov 26 17:06:57 crc kubenswrapper[5010]: I1126 17:06:57.328778 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j2gdw" event={"ID":"52088ec4-ddbc-4524-83e8-ca6d029082fc","Type":"ContainerDied","Data":"0e53fa8c2e11c7da51c9ca09f6f0926bfd55f433e8922611449b5f51abb466c9"} Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.783505 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.948878 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-scripts\") pod \"52088ec4-ddbc-4524-83e8-ca6d029082fc\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.949082 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-config-data\") pod \"52088ec4-ddbc-4524-83e8-ca6d029082fc\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.949189 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52088ec4-ddbc-4524-83e8-ca6d029082fc-logs\") pod \"52088ec4-ddbc-4524-83e8-ca6d029082fc\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.949218 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bt4lq\" (UniqueName: \"kubernetes.io/projected/52088ec4-ddbc-4524-83e8-ca6d029082fc-kube-api-access-bt4lq\") pod \"52088ec4-ddbc-4524-83e8-ca6d029082fc\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.949252 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-combined-ca-bundle\") pod \"52088ec4-ddbc-4524-83e8-ca6d029082fc\" (UID: \"52088ec4-ddbc-4524-83e8-ca6d029082fc\") " Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.950488 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52088ec4-ddbc-4524-83e8-ca6d029082fc-logs" (OuterVolumeSpecName: "logs") pod "52088ec4-ddbc-4524-83e8-ca6d029082fc" (UID: "52088ec4-ddbc-4524-83e8-ca6d029082fc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.956807 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-scripts" (OuterVolumeSpecName: "scripts") pod "52088ec4-ddbc-4524-83e8-ca6d029082fc" (UID: "52088ec4-ddbc-4524-83e8-ca6d029082fc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.961094 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52088ec4-ddbc-4524-83e8-ca6d029082fc-kube-api-access-bt4lq" (OuterVolumeSpecName: "kube-api-access-bt4lq") pod "52088ec4-ddbc-4524-83e8-ca6d029082fc" (UID: "52088ec4-ddbc-4524-83e8-ca6d029082fc"). InnerVolumeSpecName "kube-api-access-bt4lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.981244 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "52088ec4-ddbc-4524-83e8-ca6d029082fc" (UID: "52088ec4-ddbc-4524-83e8-ca6d029082fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:58 crc kubenswrapper[5010]: I1126 17:06:58.990490 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-config-data" (OuterVolumeSpecName: "config-data") pod "52088ec4-ddbc-4524-83e8-ca6d029082fc" (UID: "52088ec4-ddbc-4524-83e8-ca6d029082fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.051693 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.051749 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/52088ec4-ddbc-4524-83e8-ca6d029082fc-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.051763 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bt4lq\" (UniqueName: \"kubernetes.io/projected/52088ec4-ddbc-4524-83e8-ca6d029082fc-kube-api-access-bt4lq\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.051778 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.051791 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/52088ec4-ddbc-4524-83e8-ca6d029082fc-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.357515 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-j2gdw" event={"ID":"52088ec4-ddbc-4524-83e8-ca6d029082fc","Type":"ContainerDied","Data":"3bd3e4043dd87f13127a15fc0d2c5e1a6a6780b1de180679dd531b11c1f4fade"} Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.357576 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bd3e4043dd87f13127a15fc0d2c5e1a6a6780b1de180679dd531b11c1f4fade" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.357602 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-j2gdw" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.890156 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f6db4d686-lqclr"] Nov 26 17:06:59 crc kubenswrapper[5010]: E1126 17:06:59.891103 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52088ec4-ddbc-4524-83e8-ca6d029082fc" containerName="placement-db-sync" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.891129 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="52088ec4-ddbc-4524-83e8-ca6d029082fc" containerName="placement-db-sync" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.891442 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="52088ec4-ddbc-4524-83e8-ca6d029082fc" containerName="placement-db-sync" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.895526 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.902526 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.902801 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.903032 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.903175 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.903193 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-gkj78" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.914582 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f6db4d686-lqclr"] Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.975973 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-config-data\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.976060 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-scripts\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.976108 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcbq7\" (UniqueName: \"kubernetes.io/projected/5412afb1-3aa4-4a56-8078-23e8c783f3ea-kube-api-access-bcbq7\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.976134 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-internal-tls-certs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.976182 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-public-tls-certs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.976223 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-combined-ca-bundle\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:06:59 crc kubenswrapper[5010]: I1126 17:06:59.976246 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5412afb1-3aa4-4a56-8078-23e8c783f3ea-logs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.078167 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcbq7\" (UniqueName: \"kubernetes.io/projected/5412afb1-3aa4-4a56-8078-23e8c783f3ea-kube-api-access-bcbq7\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.078219 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-internal-tls-certs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.078253 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-public-tls-certs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.078314 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-combined-ca-bundle\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.078337 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5412afb1-3aa4-4a56-8078-23e8c783f3ea-logs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.078386 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-config-data\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.078440 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-scripts\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.079165 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5412afb1-3aa4-4a56-8078-23e8c783f3ea-logs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.082183 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-internal-tls-certs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.082992 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-combined-ca-bundle\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.084283 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-config-data\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.085976 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-scripts\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.086040 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5412afb1-3aa4-4a56-8078-23e8c783f3ea-public-tls-certs\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.096918 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcbq7\" (UniqueName: \"kubernetes.io/projected/5412afb1-3aa4-4a56-8078-23e8c783f3ea-kube-api-access-bcbq7\") pod \"placement-f6db4d686-lqclr\" (UID: \"5412afb1-3aa4-4a56-8078-23e8c783f3ea\") " pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.232914 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:00 crc kubenswrapper[5010]: I1126 17:07:00.654959 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f6db4d686-lqclr"] Nov 26 17:07:01 crc kubenswrapper[5010]: I1126 17:07:01.385377 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f6db4d686-lqclr" event={"ID":"5412afb1-3aa4-4a56-8078-23e8c783f3ea","Type":"ContainerStarted","Data":"e03c22bebc07a23c83755fdab1118346cc835f80cfb3ef5bdd2e1c62cb31b7ab"} Nov 26 17:07:01 crc kubenswrapper[5010]: I1126 17:07:01.385644 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:01 crc kubenswrapper[5010]: I1126 17:07:01.385655 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:01 crc kubenswrapper[5010]: I1126 17:07:01.385664 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f6db4d686-lqclr" event={"ID":"5412afb1-3aa4-4a56-8078-23e8c783f3ea","Type":"ContainerStarted","Data":"f27675f5cb9fee637d9eb7eccc0e13e716b106e2bd6f8f0383bcf9a4a12c6d08"} Nov 26 17:07:01 crc kubenswrapper[5010]: I1126 17:07:01.385673 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f6db4d686-lqclr" event={"ID":"5412afb1-3aa4-4a56-8078-23e8c783f3ea","Type":"ContainerStarted","Data":"f464e079201d7c35b7e933673948d24bc73e95f6895d27088ec90ba8be2703bf"} Nov 26 17:07:01 crc kubenswrapper[5010]: I1126 17:07:01.417663 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-f6db4d686-lqclr" podStartSLOduration=2.417643128 podStartE2EDuration="2.417643128s" podCreationTimestamp="2025-11-26 17:06:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:07:01.4132797 +0000 UTC m=+6042.203996858" watchObservedRunningTime="2025-11-26 17:07:01.417643128 +0000 UTC m=+6042.208360286" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.032136 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.123471 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6988c6b67f-4dbcn"] Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.123823 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" podUID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerName="dnsmasq-dns" containerID="cri-o://3c98591f5a402d9130156bff6033377e77cf83c2146f490e3d4223841bdf3cfc" gracePeriod=10 Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.427946 5010 generic.go:334] "Generic (PLEG): container finished" podID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerID="3c98591f5a402d9130156bff6033377e77cf83c2146f490e3d4223841bdf3cfc" exitCode=0 Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.428020 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" event={"ID":"31a50f49-b8ee-4511-8c6b-f31245536f56","Type":"ContainerDied","Data":"3c98591f5a402d9130156bff6033377e77cf83c2146f490e3d4223841bdf3cfc"} Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.636666 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.774778 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-nb\") pod \"31a50f49-b8ee-4511-8c6b-f31245536f56\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.775279 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-sb\") pod \"31a50f49-b8ee-4511-8c6b-f31245536f56\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.775815 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-dns-svc\") pod \"31a50f49-b8ee-4511-8c6b-f31245536f56\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.775892 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w89m2\" (UniqueName: \"kubernetes.io/projected/31a50f49-b8ee-4511-8c6b-f31245536f56-kube-api-access-w89m2\") pod \"31a50f49-b8ee-4511-8c6b-f31245536f56\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.775920 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-config\") pod \"31a50f49-b8ee-4511-8c6b-f31245536f56\" (UID: \"31a50f49-b8ee-4511-8c6b-f31245536f56\") " Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.784264 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31a50f49-b8ee-4511-8c6b-f31245536f56-kube-api-access-w89m2" (OuterVolumeSpecName: "kube-api-access-w89m2") pod "31a50f49-b8ee-4511-8c6b-f31245536f56" (UID: "31a50f49-b8ee-4511-8c6b-f31245536f56"). InnerVolumeSpecName "kube-api-access-w89m2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.845837 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "31a50f49-b8ee-4511-8c6b-f31245536f56" (UID: "31a50f49-b8ee-4511-8c6b-f31245536f56"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.860527 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "31a50f49-b8ee-4511-8c6b-f31245536f56" (UID: "31a50f49-b8ee-4511-8c6b-f31245536f56"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.863300 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "31a50f49-b8ee-4511-8c6b-f31245536f56" (UID: "31a50f49-b8ee-4511-8c6b-f31245536f56"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.870027 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-config" (OuterVolumeSpecName: "config") pod "31a50f49-b8ee-4511-8c6b-f31245536f56" (UID: "31a50f49-b8ee-4511-8c6b-f31245536f56"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.879475 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.879513 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.879527 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.879537 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/31a50f49-b8ee-4511-8c6b-f31245536f56-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:04 crc kubenswrapper[5010]: I1126 17:07:04.879551 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w89m2\" (UniqueName: \"kubernetes.io/projected/31a50f49-b8ee-4511-8c6b-f31245536f56-kube-api-access-w89m2\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:05 crc kubenswrapper[5010]: I1126 17:07:05.437425 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" event={"ID":"31a50f49-b8ee-4511-8c6b-f31245536f56","Type":"ContainerDied","Data":"bbaee86b2326b8469f0bf7cc7cf3d0af688167a448655539e4574c1c92ee0160"} Nov 26 17:07:05 crc kubenswrapper[5010]: I1126 17:07:05.437482 5010 scope.go:117] "RemoveContainer" containerID="3c98591f5a402d9130156bff6033377e77cf83c2146f490e3d4223841bdf3cfc" Nov 26 17:07:05 crc kubenswrapper[5010]: I1126 17:07:05.437481 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6988c6b67f-4dbcn" Nov 26 17:07:05 crc kubenswrapper[5010]: I1126 17:07:05.461076 5010 scope.go:117] "RemoveContainer" containerID="6d02c1b8b8b6d59d6382bdbb555dd8231a100cceca36c48cc3a28db1864c3393" Nov 26 17:07:05 crc kubenswrapper[5010]: I1126 17:07:05.476970 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6988c6b67f-4dbcn"] Nov 26 17:07:05 crc kubenswrapper[5010]: I1126 17:07:05.491077 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6988c6b67f-4dbcn"] Nov 26 17:07:05 crc kubenswrapper[5010]: I1126 17:07:05.908505 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31a50f49-b8ee-4511-8c6b-f31245536f56" path="/var/lib/kubelet/pods/31a50f49-b8ee-4511-8c6b-f31245536f56/volumes" Nov 26 17:07:31 crc kubenswrapper[5010]: I1126 17:07:31.289842 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:31 crc kubenswrapper[5010]: I1126 17:07:31.341252 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-f6db4d686-lqclr" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.365698 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sg52c"] Nov 26 17:07:51 crc kubenswrapper[5010]: E1126 17:07:51.367376 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerName="dnsmasq-dns" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.367418 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerName="dnsmasq-dns" Nov 26 17:07:51 crc kubenswrapper[5010]: E1126 17:07:51.367454 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerName="init" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.367474 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerName="init" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.367973 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="31a50f49-b8ee-4511-8c6b-f31245536f56" containerName="dnsmasq-dns" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.371326 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.381587 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sg52c"] Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.550821 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-utilities\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.550937 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-catalog-content\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.551016 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98v26\" (UniqueName: \"kubernetes.io/projected/ed597718-41fc-4a31-98a6-e5e023a968ef-kube-api-access-98v26\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.652870 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-catalog-content\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.652957 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98v26\" (UniqueName: \"kubernetes.io/projected/ed597718-41fc-4a31-98a6-e5e023a968ef-kube-api-access-98v26\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.653038 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-utilities\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.653456 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-catalog-content\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.653477 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-utilities\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.675808 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98v26\" (UniqueName: \"kubernetes.io/projected/ed597718-41fc-4a31-98a6-e5e023a968ef-kube-api-access-98v26\") pod \"certified-operators-sg52c\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:51 crc kubenswrapper[5010]: I1126 17:07:51.707992 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:07:52 crc kubenswrapper[5010]: I1126 17:07:52.286053 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sg52c"] Nov 26 17:07:52 crc kubenswrapper[5010]: I1126 17:07:52.974214 5010 generic.go:334] "Generic (PLEG): container finished" podID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerID="feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299" exitCode=0 Nov 26 17:07:52 crc kubenswrapper[5010]: I1126 17:07:52.974432 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sg52c" event={"ID":"ed597718-41fc-4a31-98a6-e5e023a968ef","Type":"ContainerDied","Data":"feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299"} Nov 26 17:07:52 crc kubenswrapper[5010]: I1126 17:07:52.974534 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sg52c" event={"ID":"ed597718-41fc-4a31-98a6-e5e023a968ef","Type":"ContainerStarted","Data":"b1b9deba9b0594ab4693eae0ce3bd1b40bbedb9ba6b677aaf414cbc65740caa4"} Nov 26 17:07:53 crc kubenswrapper[5010]: I1126 17:07:53.985434 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sg52c" event={"ID":"ed597718-41fc-4a31-98a6-e5e023a968ef","Type":"ContainerStarted","Data":"20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24"} Nov 26 17:07:54 crc kubenswrapper[5010]: I1126 17:07:54.998172 5010 generic.go:334] "Generic (PLEG): container finished" podID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerID="20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24" exitCode=0 Nov 26 17:07:54 crc kubenswrapper[5010]: I1126 17:07:54.998264 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sg52c" event={"ID":"ed597718-41fc-4a31-98a6-e5e023a968ef","Type":"ContainerDied","Data":"20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24"} Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.619901 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-8l9b6"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.621418 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.628394 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8l9b6"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.710599 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-kz6ff"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.711848 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.725071 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-kz6ff"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.727910 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1370ec66-0402-4b67-acf3-ddeb0c734107-operator-scripts\") pod \"nova-api-db-create-8l9b6\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.727983 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w78k5\" (UniqueName: \"kubernetes.io/projected/1370ec66-0402-4b67-acf3-ddeb0c734107-kube-api-access-w78k5\") pod \"nova-api-db-create-8l9b6\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.814275 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-d145-account-create-update-xxj6c"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.816242 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.817697 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.824905 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-d145-account-create-update-xxj6c"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.830875 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82d71aae-da28-4c09-9c6e-f665982ea911-operator-scripts\") pod \"nova-cell0-db-create-kz6ff\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.830991 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxkl6\" (UniqueName: \"kubernetes.io/projected/82d71aae-da28-4c09-9c6e-f665982ea911-kube-api-access-xxkl6\") pod \"nova-cell0-db-create-kz6ff\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.831021 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1370ec66-0402-4b67-acf3-ddeb0c734107-operator-scripts\") pod \"nova-api-db-create-8l9b6\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.831077 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w78k5\" (UniqueName: \"kubernetes.io/projected/1370ec66-0402-4b67-acf3-ddeb0c734107-kube-api-access-w78k5\") pod \"nova-api-db-create-8l9b6\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.831845 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1370ec66-0402-4b67-acf3-ddeb0c734107-operator-scripts\") pod \"nova-api-db-create-8l9b6\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.860685 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w78k5\" (UniqueName: \"kubernetes.io/projected/1370ec66-0402-4b67-acf3-ddeb0c734107-kube-api-access-w78k5\") pod \"nova-api-db-create-8l9b6\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.933086 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-vqcbv"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.936591 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8161da-57ca-4a25-928d-ec41f12b6916-operator-scripts\") pod \"nova-api-d145-account-create-update-xxj6c\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.936752 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4xm9\" (UniqueName: \"kubernetes.io/projected/bf8161da-57ca-4a25-928d-ec41f12b6916-kube-api-access-v4xm9\") pod \"nova-api-d145-account-create-update-xxj6c\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.936790 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.937196 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxkl6\" (UniqueName: \"kubernetes.io/projected/82d71aae-da28-4c09-9c6e-f665982ea911-kube-api-access-xxkl6\") pod \"nova-cell0-db-create-kz6ff\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.937834 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82d71aae-da28-4c09-9c6e-f665982ea911-operator-scripts\") pod \"nova-cell0-db-create-kz6ff\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.938607 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82d71aae-da28-4c09-9c6e-f665982ea911-operator-scripts\") pod \"nova-cell0-db-create-kz6ff\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.952046 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-vqcbv"] Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.955544 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxkl6\" (UniqueName: \"kubernetes.io/projected/82d71aae-da28-4c09-9c6e-f665982ea911-kube-api-access-xxkl6\") pod \"nova-cell0-db-create-kz6ff\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:55 crc kubenswrapper[5010]: I1126 17:07:55.968326 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.023056 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sg52c" event={"ID":"ed597718-41fc-4a31-98a6-e5e023a968ef","Type":"ContainerStarted","Data":"a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee"} Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.028347 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-c5dd-account-create-update-n6nql"] Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.029540 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.031107 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.035435 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.042318 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8hz8z\" (UniqueName: \"kubernetes.io/projected/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-kube-api-access-8hz8z\") pod \"nova-cell1-db-create-vqcbv\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.042355 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8161da-57ca-4a25-928d-ec41f12b6916-operator-scripts\") pod \"nova-api-d145-account-create-update-xxj6c\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.042390 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4xm9\" (UniqueName: \"kubernetes.io/projected/bf8161da-57ca-4a25-928d-ec41f12b6916-kube-api-access-v4xm9\") pod \"nova-api-d145-account-create-update-xxj6c\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.042509 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-operator-scripts\") pod \"nova-cell1-db-create-vqcbv\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.043531 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8161da-57ca-4a25-928d-ec41f12b6916-operator-scripts\") pod \"nova-api-d145-account-create-update-xxj6c\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.053698 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c5dd-account-create-update-n6nql"] Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.059680 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sg52c" podStartSLOduration=2.630740443 podStartE2EDuration="5.059653516s" podCreationTimestamp="2025-11-26 17:07:51 +0000 UTC" firstStartedPulling="2025-11-26 17:07:52.9766839 +0000 UTC m=+6093.767401058" lastFinishedPulling="2025-11-26 17:07:55.405596973 +0000 UTC m=+6096.196314131" observedRunningTime="2025-11-26 17:07:56.041128235 +0000 UTC m=+6096.831845393" watchObservedRunningTime="2025-11-26 17:07:56.059653516 +0000 UTC m=+6096.850370664" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.068778 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4xm9\" (UniqueName: \"kubernetes.io/projected/bf8161da-57ca-4a25-928d-ec41f12b6916-kube-api-access-v4xm9\") pod \"nova-api-d145-account-create-update-xxj6c\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.142274 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.143733 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56a19aa9-1344-4742-9705-0dc8191f47a5-operator-scripts\") pod \"nova-cell0-c5dd-account-create-update-n6nql\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.143769 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8hz8z\" (UniqueName: \"kubernetes.io/projected/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-kube-api-access-8hz8z\") pod \"nova-cell1-db-create-vqcbv\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.143843 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-operator-scripts\") pod \"nova-cell1-db-create-vqcbv\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.143882 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs879\" (UniqueName: \"kubernetes.io/projected/56a19aa9-1344-4742-9705-0dc8191f47a5-kube-api-access-bs879\") pod \"nova-cell0-c5dd-account-create-update-n6nql\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.144683 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-operator-scripts\") pod \"nova-cell1-db-create-vqcbv\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.163751 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8hz8z\" (UniqueName: \"kubernetes.io/projected/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-kube-api-access-8hz8z\") pod \"nova-cell1-db-create-vqcbv\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.229524 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-be3a-account-create-update-lpt74"] Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.230835 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.233029 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.247081 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56a19aa9-1344-4742-9705-0dc8191f47a5-operator-scripts\") pod \"nova-cell0-c5dd-account-create-update-n6nql\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.247197 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs879\" (UniqueName: \"kubernetes.io/projected/56a19aa9-1344-4742-9705-0dc8191f47a5-kube-api-access-bs879\") pod \"nova-cell0-c5dd-account-create-update-n6nql\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.248219 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56a19aa9-1344-4742-9705-0dc8191f47a5-operator-scripts\") pod \"nova-cell0-c5dd-account-create-update-n6nql\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.257520 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-be3a-account-create-update-lpt74"] Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.268056 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs879\" (UniqueName: \"kubernetes.io/projected/56a19aa9-1344-4742-9705-0dc8191f47a5-kube-api-access-bs879\") pod \"nova-cell0-c5dd-account-create-update-n6nql\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.294534 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.349815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frvwt\" (UniqueName: \"kubernetes.io/projected/47d38398-693a-429e-9941-aff2dd54a904-kube-api-access-frvwt\") pod \"nova-cell1-be3a-account-create-update-lpt74\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.350249 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47d38398-693a-429e-9941-aff2dd54a904-operator-scripts\") pod \"nova-cell1-be3a-account-create-update-lpt74\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.425128 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.459117 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frvwt\" (UniqueName: \"kubernetes.io/projected/47d38398-693a-429e-9941-aff2dd54a904-kube-api-access-frvwt\") pod \"nova-cell1-be3a-account-create-update-lpt74\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.459515 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47d38398-693a-429e-9941-aff2dd54a904-operator-scripts\") pod \"nova-cell1-be3a-account-create-update-lpt74\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.461412 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47d38398-693a-429e-9941-aff2dd54a904-operator-scripts\") pod \"nova-cell1-be3a-account-create-update-lpt74\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.475317 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frvwt\" (UniqueName: \"kubernetes.io/projected/47d38398-693a-429e-9941-aff2dd54a904-kube-api-access-frvwt\") pod \"nova-cell1-be3a-account-create-update-lpt74\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.567396 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.571076 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-kz6ff"] Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.583210 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8l9b6"] Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.732803 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-d145-account-create-update-xxj6c"] Nov 26 17:07:56 crc kubenswrapper[5010]: I1126 17:07:56.891452 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-vqcbv"] Nov 26 17:07:57 crc kubenswrapper[5010]: I1126 17:07:57.014566 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c5dd-account-create-update-n6nql"] Nov 26 17:07:57 crc kubenswrapper[5010]: I1126 17:07:57.033063 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-vqcbv" event={"ID":"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6","Type":"ContainerStarted","Data":"eb32f82e7fef719245faa306a50387624a080636a0dd8ef3ff3b31131f7432ed"} Nov 26 17:07:57 crc kubenswrapper[5010]: I1126 17:07:57.034780 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8l9b6" event={"ID":"1370ec66-0402-4b67-acf3-ddeb0c734107","Type":"ContainerStarted","Data":"486527e82ef4b776b7803b7ea3a3766ba058906d525b1db40855e3334e376101"} Nov 26 17:07:57 crc kubenswrapper[5010]: I1126 17:07:57.036142 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" event={"ID":"56a19aa9-1344-4742-9705-0dc8191f47a5","Type":"ContainerStarted","Data":"1f91c9c99d21172f5206213d5a2ad29f075063bf069d87fcf5337d8224ffc47c"} Nov 26 17:07:57 crc kubenswrapper[5010]: I1126 17:07:57.037814 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-d145-account-create-update-xxj6c" event={"ID":"bf8161da-57ca-4a25-928d-ec41f12b6916","Type":"ContainerStarted","Data":"7095c8ad4b8d290ad3c7a2604615bf99eee8d44f11398816d9653b645f1bde2c"} Nov 26 17:07:57 crc kubenswrapper[5010]: I1126 17:07:57.038985 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kz6ff" event={"ID":"82d71aae-da28-4c09-9c6e-f665982ea911","Type":"ContainerStarted","Data":"1c1472488abe01fe9e5f672eabd2e937041a8026c3f31f57464870eda92d32ce"} Nov 26 17:07:57 crc kubenswrapper[5010]: W1126 17:07:57.158257 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47d38398_693a_429e_9941_aff2dd54a904.slice/crio-2b91b728e6abec054348b8eabc25fde2c17ff4f6264a82676a69e778429e3d24 WatchSource:0}: Error finding container 2b91b728e6abec054348b8eabc25fde2c17ff4f6264a82676a69e778429e3d24: Status 404 returned error can't find the container with id 2b91b728e6abec054348b8eabc25fde2c17ff4f6264a82676a69e778429e3d24 Nov 26 17:07:57 crc kubenswrapper[5010]: I1126 17:07:57.161695 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-be3a-account-create-update-lpt74"] Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.054250 5010 generic.go:334] "Generic (PLEG): container finished" podID="82d71aae-da28-4c09-9c6e-f665982ea911" containerID="ec47517e8cd43382dbe26789f6aa5ce2cf3f5ccf9508cee8822058294d49f8fa" exitCode=0 Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.054304 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kz6ff" event={"ID":"82d71aae-da28-4c09-9c6e-f665982ea911","Type":"ContainerDied","Data":"ec47517e8cd43382dbe26789f6aa5ce2cf3f5ccf9508cee8822058294d49f8fa"} Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.058060 5010 generic.go:334] "Generic (PLEG): container finished" podID="c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6" containerID="acaa88689664d9ad22644fadb3acfeb8b28c2521b691fc660611dafc348326a9" exitCode=0 Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.058157 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-vqcbv" event={"ID":"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6","Type":"ContainerDied","Data":"acaa88689664d9ad22644fadb3acfeb8b28c2521b691fc660611dafc348326a9"} Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.062698 5010 generic.go:334] "Generic (PLEG): container finished" podID="1370ec66-0402-4b67-acf3-ddeb0c734107" containerID="82443df6ef8f7f2a9ad789d44aafa8a0a0cc09b0b64c0b29d03d07129a5fa2e1" exitCode=0 Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.062811 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8l9b6" event={"ID":"1370ec66-0402-4b67-acf3-ddeb0c734107","Type":"ContainerDied","Data":"82443df6ef8f7f2a9ad789d44aafa8a0a0cc09b0b64c0b29d03d07129a5fa2e1"} Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.065027 5010 generic.go:334] "Generic (PLEG): container finished" podID="56a19aa9-1344-4742-9705-0dc8191f47a5" containerID="7d6fefc8f5fe836bcecfccd6737617e015d761e65db290fc3d3b476d48bb961d" exitCode=0 Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.065149 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" event={"ID":"56a19aa9-1344-4742-9705-0dc8191f47a5","Type":"ContainerDied","Data":"7d6fefc8f5fe836bcecfccd6737617e015d761e65db290fc3d3b476d48bb961d"} Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.073541 5010 generic.go:334] "Generic (PLEG): container finished" podID="47d38398-693a-429e-9941-aff2dd54a904" containerID="ab1efd807dac43def7c48c3441d4a6b75a2322e163e1ab616526468f5b7bf3e1" exitCode=0 Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.073638 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-be3a-account-create-update-lpt74" event={"ID":"47d38398-693a-429e-9941-aff2dd54a904","Type":"ContainerDied","Data":"ab1efd807dac43def7c48c3441d4a6b75a2322e163e1ab616526468f5b7bf3e1"} Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.073669 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-be3a-account-create-update-lpt74" event={"ID":"47d38398-693a-429e-9941-aff2dd54a904","Type":"ContainerStarted","Data":"2b91b728e6abec054348b8eabc25fde2c17ff4f6264a82676a69e778429e3d24"} Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.077044 5010 generic.go:334] "Generic (PLEG): container finished" podID="bf8161da-57ca-4a25-928d-ec41f12b6916" containerID="1a44697e4959368bad2f6f6bacdf084e1515de8b4a8a1aa5d304515f7e152d20" exitCode=0 Nov 26 17:07:58 crc kubenswrapper[5010]: I1126 17:07:58.077094 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-d145-account-create-update-xxj6c" event={"ID":"bf8161da-57ca-4a25-928d-ec41f12b6916","Type":"ContainerDied","Data":"1a44697e4959368bad2f6f6bacdf084e1515de8b4a8a1aa5d304515f7e152d20"} Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.451569 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.632467 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.640362 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxkl6\" (UniqueName: \"kubernetes.io/projected/82d71aae-da28-4c09-9c6e-f665982ea911-kube-api-access-xxkl6\") pod \"82d71aae-da28-4c09-9c6e-f665982ea911\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.640433 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82d71aae-da28-4c09-9c6e-f665982ea911-operator-scripts\") pod \"82d71aae-da28-4c09-9c6e-f665982ea911\" (UID: \"82d71aae-da28-4c09-9c6e-f665982ea911\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.641349 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.641605 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82d71aae-da28-4c09-9c6e-f665982ea911-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "82d71aae-da28-4c09-9c6e-f665982ea911" (UID: "82d71aae-da28-4c09-9c6e-f665982ea911"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.651196 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.659803 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82d71aae-da28-4c09-9c6e-f665982ea911-kube-api-access-xxkl6" (OuterVolumeSpecName: "kube-api-access-xxkl6") pod "82d71aae-da28-4c09-9c6e-f665982ea911" (UID: "82d71aae-da28-4c09-9c6e-f665982ea911"). InnerVolumeSpecName "kube-api-access-xxkl6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.673065 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.678518 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.741976 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w78k5\" (UniqueName: \"kubernetes.io/projected/1370ec66-0402-4b67-acf3-ddeb0c734107-kube-api-access-w78k5\") pod \"1370ec66-0402-4b67-acf3-ddeb0c734107\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.742167 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1370ec66-0402-4b67-acf3-ddeb0c734107-operator-scripts\") pod \"1370ec66-0402-4b67-acf3-ddeb0c734107\" (UID: \"1370ec66-0402-4b67-acf3-ddeb0c734107\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.742264 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56a19aa9-1344-4742-9705-0dc8191f47a5-operator-scripts\") pod \"56a19aa9-1344-4742-9705-0dc8191f47a5\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.742291 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bs879\" (UniqueName: \"kubernetes.io/projected/56a19aa9-1344-4742-9705-0dc8191f47a5-kube-api-access-bs879\") pod \"56a19aa9-1344-4742-9705-0dc8191f47a5\" (UID: \"56a19aa9-1344-4742-9705-0dc8191f47a5\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.742642 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxkl6\" (UniqueName: \"kubernetes.io/projected/82d71aae-da28-4c09-9c6e-f665982ea911-kube-api-access-xxkl6\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.742659 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82d71aae-da28-4c09-9c6e-f665982ea911-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.743588 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/56a19aa9-1344-4742-9705-0dc8191f47a5-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "56a19aa9-1344-4742-9705-0dc8191f47a5" (UID: "56a19aa9-1344-4742-9705-0dc8191f47a5"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.743813 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1370ec66-0402-4b67-acf3-ddeb0c734107-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1370ec66-0402-4b67-acf3-ddeb0c734107" (UID: "1370ec66-0402-4b67-acf3-ddeb0c734107"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.745936 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56a19aa9-1344-4742-9705-0dc8191f47a5-kube-api-access-bs879" (OuterVolumeSpecName: "kube-api-access-bs879") pod "56a19aa9-1344-4742-9705-0dc8191f47a5" (UID: "56a19aa9-1344-4742-9705-0dc8191f47a5"). InnerVolumeSpecName "kube-api-access-bs879". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.746450 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1370ec66-0402-4b67-acf3-ddeb0c734107-kube-api-access-w78k5" (OuterVolumeSpecName: "kube-api-access-w78k5") pod "1370ec66-0402-4b67-acf3-ddeb0c734107" (UID: "1370ec66-0402-4b67-acf3-ddeb0c734107"). InnerVolumeSpecName "kube-api-access-w78k5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843285 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8161da-57ca-4a25-928d-ec41f12b6916-operator-scripts\") pod \"bf8161da-57ca-4a25-928d-ec41f12b6916\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843362 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-operator-scripts\") pod \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843565 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4xm9\" (UniqueName: \"kubernetes.io/projected/bf8161da-57ca-4a25-928d-ec41f12b6916-kube-api-access-v4xm9\") pod \"bf8161da-57ca-4a25-928d-ec41f12b6916\" (UID: \"bf8161da-57ca-4a25-928d-ec41f12b6916\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843634 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-frvwt\" (UniqueName: \"kubernetes.io/projected/47d38398-693a-429e-9941-aff2dd54a904-kube-api-access-frvwt\") pod \"47d38398-693a-429e-9941-aff2dd54a904\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843671 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8hz8z\" (UniqueName: \"kubernetes.io/projected/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-kube-api-access-8hz8z\") pod \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\" (UID: \"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843687 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47d38398-693a-429e-9941-aff2dd54a904-operator-scripts\") pod \"47d38398-693a-429e-9941-aff2dd54a904\" (UID: \"47d38398-693a-429e-9941-aff2dd54a904\") " Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843810 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf8161da-57ca-4a25-928d-ec41f12b6916-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bf8161da-57ca-4a25-928d-ec41f12b6916" (UID: "bf8161da-57ca-4a25-928d-ec41f12b6916"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.843858 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6" (UID: "c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.844202 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/56a19aa9-1344-4742-9705-0dc8191f47a5-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.844214 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/47d38398-693a-429e-9941-aff2dd54a904-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "47d38398-693a-429e-9941-aff2dd54a904" (UID: "47d38398-693a-429e-9941-aff2dd54a904"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.844223 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bs879\" (UniqueName: \"kubernetes.io/projected/56a19aa9-1344-4742-9705-0dc8191f47a5-kube-api-access-bs879\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.844234 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w78k5\" (UniqueName: \"kubernetes.io/projected/1370ec66-0402-4b67-acf3-ddeb0c734107-kube-api-access-w78k5\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.844244 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bf8161da-57ca-4a25-928d-ec41f12b6916-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.844253 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.844262 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1370ec66-0402-4b67-acf3-ddeb0c734107-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.847072 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf8161da-57ca-4a25-928d-ec41f12b6916-kube-api-access-v4xm9" (OuterVolumeSpecName: "kube-api-access-v4xm9") pod "bf8161da-57ca-4a25-928d-ec41f12b6916" (UID: "bf8161da-57ca-4a25-928d-ec41f12b6916"). InnerVolumeSpecName "kube-api-access-v4xm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.847190 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-kube-api-access-8hz8z" (OuterVolumeSpecName: "kube-api-access-8hz8z") pod "c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6" (UID: "c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6"). InnerVolumeSpecName "kube-api-access-8hz8z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.848974 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47d38398-693a-429e-9941-aff2dd54a904-kube-api-access-frvwt" (OuterVolumeSpecName: "kube-api-access-frvwt") pod "47d38398-693a-429e-9941-aff2dd54a904" (UID: "47d38398-693a-429e-9941-aff2dd54a904"). InnerVolumeSpecName "kube-api-access-frvwt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.947536 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4xm9\" (UniqueName: \"kubernetes.io/projected/bf8161da-57ca-4a25-928d-ec41f12b6916-kube-api-access-v4xm9\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.947579 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-frvwt\" (UniqueName: \"kubernetes.io/projected/47d38398-693a-429e-9941-aff2dd54a904-kube-api-access-frvwt\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.947593 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8hz8z\" (UniqueName: \"kubernetes.io/projected/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6-kube-api-access-8hz8z\") on node \"crc\" DevicePath \"\"" Nov 26 17:07:59 crc kubenswrapper[5010]: I1126 17:07:59.947605 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/47d38398-693a-429e-9941-aff2dd54a904-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.107004 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-d145-account-create-update-xxj6c" event={"ID":"bf8161da-57ca-4a25-928d-ec41f12b6916","Type":"ContainerDied","Data":"7095c8ad4b8d290ad3c7a2604615bf99eee8d44f11398816d9653b645f1bde2c"} Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.107458 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7095c8ad4b8d290ad3c7a2604615bf99eee8d44f11398816d9653b645f1bde2c" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.107071 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-d145-account-create-update-xxj6c" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.109828 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-kz6ff" event={"ID":"82d71aae-da28-4c09-9c6e-f665982ea911","Type":"ContainerDied","Data":"1c1472488abe01fe9e5f672eabd2e937041a8026c3f31f57464870eda92d32ce"} Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.109899 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c1472488abe01fe9e5f672eabd2e937041a8026c3f31f57464870eda92d32ce" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.109848 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-kz6ff" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.112392 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-vqcbv" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.112855 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-vqcbv" event={"ID":"c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6","Type":"ContainerDied","Data":"eb32f82e7fef719245faa306a50387624a080636a0dd8ef3ff3b31131f7432ed"} Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.112903 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb32f82e7fef719245faa306a50387624a080636a0dd8ef3ff3b31131f7432ed" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.115031 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8l9b6" event={"ID":"1370ec66-0402-4b67-acf3-ddeb0c734107","Type":"ContainerDied","Data":"486527e82ef4b776b7803b7ea3a3766ba058906d525b1db40855e3334e376101"} Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.115227 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="486527e82ef4b776b7803b7ea3a3766ba058906d525b1db40855e3334e376101" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.115107 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8l9b6" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.117801 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" event={"ID":"56a19aa9-1344-4742-9705-0dc8191f47a5","Type":"ContainerDied","Data":"1f91c9c99d21172f5206213d5a2ad29f075063bf069d87fcf5337d8224ffc47c"} Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.117863 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1f91c9c99d21172f5206213d5a2ad29f075063bf069d87fcf5337d8224ffc47c" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.117953 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c5dd-account-create-update-n6nql" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.120856 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-be3a-account-create-update-lpt74" event={"ID":"47d38398-693a-429e-9941-aff2dd54a904","Type":"ContainerDied","Data":"2b91b728e6abec054348b8eabc25fde2c17ff4f6264a82676a69e778429e3d24"} Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.120893 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b91b728e6abec054348b8eabc25fde2c17ff4f6264a82676a69e778429e3d24" Nov 26 17:08:00 crc kubenswrapper[5010]: I1126 17:08:00.120945 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-be3a-account-create-update-lpt74" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.281830 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-r8hq8"] Nov 26 17:08:01 crc kubenswrapper[5010]: E1126 17:08:01.282548 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82d71aae-da28-4c09-9c6e-f665982ea911" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282565 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="82d71aae-da28-4c09-9c6e-f665982ea911" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: E1126 17:08:01.282610 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf8161da-57ca-4a25-928d-ec41f12b6916" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282619 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf8161da-57ca-4a25-928d-ec41f12b6916" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: E1126 17:08:01.282631 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1370ec66-0402-4b67-acf3-ddeb0c734107" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282639 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1370ec66-0402-4b67-acf3-ddeb0c734107" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: E1126 17:08:01.282658 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56a19aa9-1344-4742-9705-0dc8191f47a5" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282665 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="56a19aa9-1344-4742-9705-0dc8191f47a5" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: E1126 17:08:01.282682 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282724 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: E1126 17:08:01.282735 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47d38398-693a-429e-9941-aff2dd54a904" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282742 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="47d38398-693a-429e-9941-aff2dd54a904" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282962 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="56a19aa9-1344-4742-9705-0dc8191f47a5" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282979 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.282997 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf8161da-57ca-4a25-928d-ec41f12b6916" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.283012 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="82d71aae-da28-4c09-9c6e-f665982ea911" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.283025 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1370ec66-0402-4b67-acf3-ddeb0c734107" containerName="mariadb-database-create" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.283043 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="47d38398-693a-429e-9941-aff2dd54a904" containerName="mariadb-account-create-update" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.283697 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.287011 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.287303 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-84zdm" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.287548 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.307424 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-r8hq8"] Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.375114 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-config-data\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.375264 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.375462 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwzb7\" (UniqueName: \"kubernetes.io/projected/5dd72f90-6373-4087-9121-8843150bd264-kube-api-access-dwzb7\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.375757 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-scripts\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.477313 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwzb7\" (UniqueName: \"kubernetes.io/projected/5dd72f90-6373-4087-9121-8843150bd264-kube-api-access-dwzb7\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.477430 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-scripts\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.477478 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-config-data\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.477522 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.484087 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.484161 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-config-data\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.485231 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-scripts\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.503503 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwzb7\" (UniqueName: \"kubernetes.io/projected/5dd72f90-6373-4087-9121-8843150bd264-kube-api-access-dwzb7\") pod \"nova-cell0-conductor-db-sync-r8hq8\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.626660 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.708216 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.709034 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:08:01 crc kubenswrapper[5010]: I1126 17:08:01.771584 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:08:02 crc kubenswrapper[5010]: I1126 17:08:02.100686 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-r8hq8"] Nov 26 17:08:02 crc kubenswrapper[5010]: W1126 17:08:02.104041 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5dd72f90_6373_4087_9121_8843150bd264.slice/crio-a66c3bfdd158f4b207e2cd801562dfc37584a90a7274f6c43561bc5487367156 WatchSource:0}: Error finding container a66c3bfdd158f4b207e2cd801562dfc37584a90a7274f6c43561bc5487367156: Status 404 returned error can't find the container with id a66c3bfdd158f4b207e2cd801562dfc37584a90a7274f6c43561bc5487367156 Nov 26 17:08:02 crc kubenswrapper[5010]: I1126 17:08:02.144633 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" event={"ID":"5dd72f90-6373-4087-9121-8843150bd264","Type":"ContainerStarted","Data":"a66c3bfdd158f4b207e2cd801562dfc37584a90a7274f6c43561bc5487367156"} Nov 26 17:08:02 crc kubenswrapper[5010]: I1126 17:08:02.194387 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:08:02 crc kubenswrapper[5010]: I1126 17:08:02.244223 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sg52c"] Nov 26 17:08:03 crc kubenswrapper[5010]: I1126 17:08:03.160310 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" event={"ID":"5dd72f90-6373-4087-9121-8843150bd264","Type":"ContainerStarted","Data":"b47c459ed43cacd884e84c4d1627087ba95e615cbe67afa829e6809a50ddb406"} Nov 26 17:08:03 crc kubenswrapper[5010]: I1126 17:08:03.184948 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" podStartSLOduration=2.184923521 podStartE2EDuration="2.184923521s" podCreationTimestamp="2025-11-26 17:08:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:03.179216549 +0000 UTC m=+6103.969933737" watchObservedRunningTime="2025-11-26 17:08:03.184923521 +0000 UTC m=+6103.975640689" Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.169313 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sg52c" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="registry-server" containerID="cri-o://a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee" gracePeriod=2 Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.592500 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.741060 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-catalog-content\") pod \"ed597718-41fc-4a31-98a6-e5e023a968ef\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.741748 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-98v26\" (UniqueName: \"kubernetes.io/projected/ed597718-41fc-4a31-98a6-e5e023a968ef-kube-api-access-98v26\") pod \"ed597718-41fc-4a31-98a6-e5e023a968ef\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.741956 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-utilities\") pod \"ed597718-41fc-4a31-98a6-e5e023a968ef\" (UID: \"ed597718-41fc-4a31-98a6-e5e023a968ef\") " Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.743510 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-utilities" (OuterVolumeSpecName: "utilities") pod "ed597718-41fc-4a31-98a6-e5e023a968ef" (UID: "ed597718-41fc-4a31-98a6-e5e023a968ef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.750602 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed597718-41fc-4a31-98a6-e5e023a968ef-kube-api-access-98v26" (OuterVolumeSpecName: "kube-api-access-98v26") pod "ed597718-41fc-4a31-98a6-e5e023a968ef" (UID: "ed597718-41fc-4a31-98a6-e5e023a968ef"). InnerVolumeSpecName "kube-api-access-98v26". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.834450 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ed597718-41fc-4a31-98a6-e5e023a968ef" (UID: "ed597718-41fc-4a31-98a6-e5e023a968ef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.844970 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.845007 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-98v26\" (UniqueName: \"kubernetes.io/projected/ed597718-41fc-4a31-98a6-e5e023a968ef-kube-api-access-98v26\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:04 crc kubenswrapper[5010]: I1126 17:08:04.845020 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ed597718-41fc-4a31-98a6-e5e023a968ef-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.178637 5010 generic.go:334] "Generic (PLEG): container finished" podID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerID="a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee" exitCode=0 Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.178739 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sg52c" event={"ID":"ed597718-41fc-4a31-98a6-e5e023a968ef","Type":"ContainerDied","Data":"a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee"} Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.179736 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sg52c" event={"ID":"ed597718-41fc-4a31-98a6-e5e023a968ef","Type":"ContainerDied","Data":"b1b9deba9b0594ab4693eae0ce3bd1b40bbedb9ba6b677aaf414cbc65740caa4"} Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.178763 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sg52c" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.179873 5010 scope.go:117] "RemoveContainer" containerID="a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.204555 5010 scope.go:117] "RemoveContainer" containerID="20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.232665 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sg52c"] Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.245764 5010 scope.go:117] "RemoveContainer" containerID="feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.249486 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sg52c"] Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.284057 5010 scope.go:117] "RemoveContainer" containerID="a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee" Nov 26 17:08:05 crc kubenswrapper[5010]: E1126 17:08:05.284500 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee\": container with ID starting with a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee not found: ID does not exist" containerID="a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.284549 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee"} err="failed to get container status \"a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee\": rpc error: code = NotFound desc = could not find container \"a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee\": container with ID starting with a3bab556b4a09e1ced1031d10f96fe3b2ad21192a74fda618386e021c35069ee not found: ID does not exist" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.284579 5010 scope.go:117] "RemoveContainer" containerID="20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24" Nov 26 17:08:05 crc kubenswrapper[5010]: E1126 17:08:05.285030 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24\": container with ID starting with 20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24 not found: ID does not exist" containerID="20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.285061 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24"} err="failed to get container status \"20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24\": rpc error: code = NotFound desc = could not find container \"20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24\": container with ID starting with 20d041013db95089cdcec196fcfa42e07bd7b49d53f15f70ef3bfadfd53fea24 not found: ID does not exist" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.285083 5010 scope.go:117] "RemoveContainer" containerID="feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299" Nov 26 17:08:05 crc kubenswrapper[5010]: E1126 17:08:05.285316 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299\": container with ID starting with feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299 not found: ID does not exist" containerID="feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.285339 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299"} err="failed to get container status \"feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299\": rpc error: code = NotFound desc = could not find container \"feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299\": container with ID starting with feb9f995e30de96608a53843dfcab0e102fb79f709dfd95029d1a554b9247299 not found: ID does not exist" Nov 26 17:08:05 crc kubenswrapper[5010]: I1126 17:08:05.904542 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" path="/var/lib/kubelet/pods/ed597718-41fc-4a31-98a6-e5e023a968ef/volumes" Nov 26 17:08:08 crc kubenswrapper[5010]: I1126 17:08:08.215034 5010 generic.go:334] "Generic (PLEG): container finished" podID="5dd72f90-6373-4087-9121-8843150bd264" containerID="b47c459ed43cacd884e84c4d1627087ba95e615cbe67afa829e6809a50ddb406" exitCode=0 Nov 26 17:08:08 crc kubenswrapper[5010]: I1126 17:08:08.215084 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" event={"ID":"5dd72f90-6373-4087-9121-8843150bd264","Type":"ContainerDied","Data":"b47c459ed43cacd884e84c4d1627087ba95e615cbe67afa829e6809a50ddb406"} Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.630742 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.759184 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwzb7\" (UniqueName: \"kubernetes.io/projected/5dd72f90-6373-4087-9121-8843150bd264-kube-api-access-dwzb7\") pod \"5dd72f90-6373-4087-9121-8843150bd264\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.759431 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-config-data\") pod \"5dd72f90-6373-4087-9121-8843150bd264\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.759620 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-combined-ca-bundle\") pod \"5dd72f90-6373-4087-9121-8843150bd264\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.759702 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-scripts\") pod \"5dd72f90-6373-4087-9121-8843150bd264\" (UID: \"5dd72f90-6373-4087-9121-8843150bd264\") " Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.769048 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-scripts" (OuterVolumeSpecName: "scripts") pod "5dd72f90-6373-4087-9121-8843150bd264" (UID: "5dd72f90-6373-4087-9121-8843150bd264"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.769084 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dd72f90-6373-4087-9121-8843150bd264-kube-api-access-dwzb7" (OuterVolumeSpecName: "kube-api-access-dwzb7") pod "5dd72f90-6373-4087-9121-8843150bd264" (UID: "5dd72f90-6373-4087-9121-8843150bd264"). InnerVolumeSpecName "kube-api-access-dwzb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.794912 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5dd72f90-6373-4087-9121-8843150bd264" (UID: "5dd72f90-6373-4087-9121-8843150bd264"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.805865 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-config-data" (OuterVolumeSpecName: "config-data") pod "5dd72f90-6373-4087-9121-8843150bd264" (UID: "5dd72f90-6373-4087-9121-8843150bd264"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.863373 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.863440 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.863451 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwzb7\" (UniqueName: \"kubernetes.io/projected/5dd72f90-6373-4087-9121-8843150bd264-kube-api-access-dwzb7\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:09 crc kubenswrapper[5010]: I1126 17:08:09.863466 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dd72f90-6373-4087-9121-8843150bd264-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.241824 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" event={"ID":"5dd72f90-6373-4087-9121-8843150bd264","Type":"ContainerDied","Data":"a66c3bfdd158f4b207e2cd801562dfc37584a90a7274f6c43561bc5487367156"} Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.241863 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a66c3bfdd158f4b207e2cd801562dfc37584a90a7274f6c43561bc5487367156" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.241879 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-r8hq8" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.349140 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 17:08:10 crc kubenswrapper[5010]: E1126 17:08:10.349785 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dd72f90-6373-4087-9121-8843150bd264" containerName="nova-cell0-conductor-db-sync" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.349803 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dd72f90-6373-4087-9121-8843150bd264" containerName="nova-cell0-conductor-db-sync" Nov 26 17:08:10 crc kubenswrapper[5010]: E1126 17:08:10.349834 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="extract-utilities" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.349842 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="extract-utilities" Nov 26 17:08:10 crc kubenswrapper[5010]: E1126 17:08:10.349856 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="registry-server" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.349862 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="registry-server" Nov 26 17:08:10 crc kubenswrapper[5010]: E1126 17:08:10.349882 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="extract-content" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.349888 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="extract-content" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.350084 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dd72f90-6373-4087-9121-8843150bd264" containerName="nova-cell0-conductor-db-sync" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.350104 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed597718-41fc-4a31-98a6-e5e023a968ef" containerName="registry-server" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.350650 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.355090 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.355735 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-84zdm" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.378426 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.475139 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjtr2\" (UniqueName: \"kubernetes.io/projected/47e9d252-9ba4-4d0d-9376-5e55278708b6-kube-api-access-zjtr2\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.475214 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.475279 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.576798 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjtr2\" (UniqueName: \"kubernetes.io/projected/47e9d252-9ba4-4d0d-9376-5e55278708b6-kube-api-access-zjtr2\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.576916 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.577172 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.582743 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.588635 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.604189 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjtr2\" (UniqueName: \"kubernetes.io/projected/47e9d252-9ba4-4d0d-9376-5e55278708b6-kube-api-access-zjtr2\") pod \"nova-cell0-conductor-0\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:10 crc kubenswrapper[5010]: I1126 17:08:10.669093 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:11 crc kubenswrapper[5010]: I1126 17:08:11.221271 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 17:08:11 crc kubenswrapper[5010]: I1126 17:08:11.255005 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"47e9d252-9ba4-4d0d-9376-5e55278708b6","Type":"ContainerStarted","Data":"162868e448866d13d79d9cd534fe3d245caa9e03bcc08032c6856292f68dea27"} Nov 26 17:08:11 crc kubenswrapper[5010]: I1126 17:08:11.422980 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:08:11 crc kubenswrapper[5010]: I1126 17:08:11.423036 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:08:12 crc kubenswrapper[5010]: I1126 17:08:12.265260 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"47e9d252-9ba4-4d0d-9376-5e55278708b6","Type":"ContainerStarted","Data":"c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de"} Nov 26 17:08:12 crc kubenswrapper[5010]: I1126 17:08:12.265601 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:12 crc kubenswrapper[5010]: I1126 17:08:12.287811 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.287789368 podStartE2EDuration="2.287789368s" podCreationTimestamp="2025-11-26 17:08:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:12.284949797 +0000 UTC m=+6113.075666945" watchObservedRunningTime="2025-11-26 17:08:12.287789368 +0000 UTC m=+6113.078506526" Nov 26 17:08:20 crc kubenswrapper[5010]: I1126 17:08:20.714503 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.203049 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-5cwcg"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.204739 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.209391 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.209735 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.217590 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-5cwcg"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.310197 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-config-data\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.310368 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.310404 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52lrq\" (UniqueName: \"kubernetes.io/projected/8775a1b7-7530-4163-9d34-b435a78fe316-kube-api-access-52lrq\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.310635 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-scripts\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.368832 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.369963 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.372984 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.393626 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.412679 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-config-data\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.412736 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.412762 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52lrq\" (UniqueName: \"kubernetes.io/projected/8775a1b7-7530-4163-9d34-b435a78fe316-kube-api-access-52lrq\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.412866 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-scripts\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.420357 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.421582 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-config-data\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.429647 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-scripts\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.451214 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52lrq\" (UniqueName: \"kubernetes.io/projected/8775a1b7-7530-4163-9d34-b435a78fe316-kube-api-access-52lrq\") pod \"nova-cell0-cell-mapping-5cwcg\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.460957 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.462449 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.466502 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.507966 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.509509 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.512525 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.515040 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-config-data\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.519626 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.519962 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhchj\" (UniqueName: \"kubernetes.io/projected/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-kube-api-access-nhchj\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.518929 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.535025 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.595554 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.623753 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624030 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-config-data\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624137 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-config-data\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624228 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhchj\" (UniqueName: \"kubernetes.io/projected/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-kube-api-access-nhchj\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624305 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6651ea75-f827-42e7-950d-6c5059e7f4ff-logs\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624402 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcmbx\" (UniqueName: \"kubernetes.io/projected/6651ea75-f827-42e7-950d-6c5059e7f4ff-kube-api-access-lcmbx\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624475 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzkf2\" (UniqueName: \"kubernetes.io/projected/b2e68d75-b2e2-4e94-9d8d-630c64f144af-kube-api-access-gzkf2\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624577 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2e68d75-b2e2-4e94-9d8d-630c64f144af-logs\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624653 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624761 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-config-data\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.624836 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.642394 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.643916 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-config-data\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.671961 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhchj\" (UniqueName: \"kubernetes.io/projected/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-kube-api-access-nhchj\") pod \"nova-scheduler-0\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.688043 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-ddc4c876c-vrnsj"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.688916 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.689982 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736028 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736413 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-config-data\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736471 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-config-data\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736672 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6651ea75-f827-42e7-950d-6c5059e7f4ff-logs\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736738 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcmbx\" (UniqueName: \"kubernetes.io/projected/6651ea75-f827-42e7-950d-6c5059e7f4ff-kube-api-access-lcmbx\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736758 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzkf2\" (UniqueName: \"kubernetes.io/projected/b2e68d75-b2e2-4e94-9d8d-630c64f144af-kube-api-access-gzkf2\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736822 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2e68d75-b2e2-4e94-9d8d-630c64f144af-logs\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.736846 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.738252 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6651ea75-f827-42e7-950d-6c5059e7f4ff-logs\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.741045 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2e68d75-b2e2-4e94-9d8d-630c64f144af-logs\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.748682 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-config-data\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.749528 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.753424 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-config-data\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.756556 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.796099 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.799307 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.809339 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcmbx\" (UniqueName: \"kubernetes.io/projected/6651ea75-f827-42e7-950d-6c5059e7f4ff-kube-api-access-lcmbx\") pod \"nova-metadata-0\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.810078 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.819697 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzkf2\" (UniqueName: \"kubernetes.io/projected/b2e68d75-b2e2-4e94-9d8d-630c64f144af-kube-api-access-gzkf2\") pod \"nova-api-0\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.839343 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-sb\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.839542 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-dns-svc\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.839572 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-config\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.839670 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-248zx\" (UniqueName: \"kubernetes.io/projected/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-kube-api-access-248zx\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.839696 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-nb\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.845799 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-ddc4c876c-vrnsj"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.846962 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.873918 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.929574 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.946852 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cx8b\" (UniqueName: \"kubernetes.io/projected/d0fe1429-41a8-40b0-a49f-650f99904126-kube-api-access-2cx8b\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.947015 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-dns-svc\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.947080 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-config\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.947308 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.947367 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-248zx\" (UniqueName: \"kubernetes.io/projected/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-kube-api-access-248zx\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.947400 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-nb\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.947462 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-sb\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.947537 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.948428 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-nb\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.948547 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-config\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.949303 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-sb\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.949643 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-dns-svc\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:21 crc kubenswrapper[5010]: I1126 17:08:21.997249 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-248zx\" (UniqueName: \"kubernetes.io/projected/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-kube-api-access-248zx\") pod \"dnsmasq-dns-ddc4c876c-vrnsj\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.048874 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cx8b\" (UniqueName: \"kubernetes.io/projected/d0fe1429-41a8-40b0-a49f-650f99904126-kube-api-access-2cx8b\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.048966 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.049044 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.052320 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.061223 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.068166 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cx8b\" (UniqueName: \"kubernetes.io/projected/d0fe1429-41a8-40b0-a49f-650f99904126-kube-api-access-2cx8b\") pod \"nova-cell1-novncproxy-0\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.090432 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.173739 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.403337 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-5cwcg"] Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.460152 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-j2nzv"] Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.461431 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.468459 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.468771 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.483186 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-j2nzv"] Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.525786 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.532994 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:08:22 crc kubenswrapper[5010]: W1126 17:08:22.542300 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb2e68d75_b2e2_4e94_9d8d_630c64f144af.slice/crio-6e3ccd2183b9a66bded65d735d7ef55869c9c74d3a504cda0fb347dda77c574b WatchSource:0}: Error finding container 6e3ccd2183b9a66bded65d735d7ef55869c9c74d3a504cda0fb347dda77c574b: Status 404 returned error can't find the container with id 6e3ccd2183b9a66bded65d735d7ef55869c9c74d3a504cda0fb347dda77c574b Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.553049 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:22 crc kubenswrapper[5010]: W1126 17:08:22.554347 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6651ea75_f827_42e7_950d_6c5059e7f4ff.slice/crio-1555489a505e17a5023307053baf105d1817bf69e536aaaa22a6a2621a68e8f9 WatchSource:0}: Error finding container 1555489a505e17a5023307053baf105d1817bf69e536aaaa22a6a2621a68e8f9: Status 404 returned error can't find the container with id 1555489a505e17a5023307053baf105d1817bf69e536aaaa22a6a2621a68e8f9 Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.560258 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrc5p\" (UniqueName: \"kubernetes.io/projected/e0c7721f-be82-4859-874e-8e73cad59726-kube-api-access-rrc5p\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.560360 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.560413 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-config-data\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.560516 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-scripts\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.662032 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-config-data\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.662096 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-scripts\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.662375 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrc5p\" (UniqueName: \"kubernetes.io/projected/e0c7721f-be82-4859-874e-8e73cad59726-kube-api-access-rrc5p\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.662462 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.666990 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.667136 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-config-data\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.669827 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-scripts\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.679574 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrc5p\" (UniqueName: \"kubernetes.io/projected/e0c7721f-be82-4859-874e-8e73cad59726-kube-api-access-rrc5p\") pod \"nova-cell1-conductor-db-sync-j2nzv\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.719505 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.745200 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:22 crc kubenswrapper[5010]: I1126 17:08:22.776251 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-ddc4c876c-vrnsj"] Nov 26 17:08:22 crc kubenswrapper[5010]: W1126 17:08:22.781992 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda14ea7c7_8798_4809_bc62_87e9bc5a2e5e.slice/crio-4b6474994318eeccb3b23658bff61e1d851e96136275acc79662a5b6fc4080d4 WatchSource:0}: Error finding container 4b6474994318eeccb3b23658bff61e1d851e96136275acc79662a5b6fc4080d4: Status 404 returned error can't find the container with id 4b6474994318eeccb3b23658bff61e1d851e96136275acc79662a5b6fc4080d4 Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.336128 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-j2nzv"] Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.408797 5010 generic.go:334] "Generic (PLEG): container finished" podID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerID="73635e4fa68e1d7e76456bcd981ed5cd020f58a9595d23de8c7b72c0c21b87f0" exitCode=0 Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.409252 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" event={"ID":"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e","Type":"ContainerDied","Data":"73635e4fa68e1d7e76456bcd981ed5cd020f58a9595d23de8c7b72c0c21b87f0"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.409288 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" event={"ID":"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e","Type":"ContainerStarted","Data":"4b6474994318eeccb3b23658bff61e1d851e96136275acc79662a5b6fc4080d4"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.491082 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6651ea75-f827-42e7-950d-6c5059e7f4ff","Type":"ContainerStarted","Data":"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.491128 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6651ea75-f827-42e7-950d-6c5059e7f4ff","Type":"ContainerStarted","Data":"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.491139 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6651ea75-f827-42e7-950d-6c5059e7f4ff","Type":"ContainerStarted","Data":"1555489a505e17a5023307053baf105d1817bf69e536aaaa22a6a2621a68e8f9"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.520270 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d0f040c3-c2dd-4450-b3c9-934374ebaf3e","Type":"ContainerStarted","Data":"10b48c5486e07bdd2361e652cc81d7854e0a6c930b735c4748ef3039b454a796"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.520322 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d0f040c3-c2dd-4450-b3c9-934374ebaf3e","Type":"ContainerStarted","Data":"b206111797e7ae541b35b53306b9352c84f4d8b9649c353f8ef4592cdfd5a39e"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.538476 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.538459649 podStartE2EDuration="2.538459649s" podCreationTimestamp="2025-11-26 17:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:23.520498082 +0000 UTC m=+6124.311215230" watchObservedRunningTime="2025-11-26 17:08:23.538459649 +0000 UTC m=+6124.329176797" Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.558942 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d0fe1429-41a8-40b0-a49f-650f99904126","Type":"ContainerStarted","Data":"bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.558986 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d0fe1429-41a8-40b0-a49f-650f99904126","Type":"ContainerStarted","Data":"bef30882738adade3d12f16dd09ba2d28e05bc3866b441d92b6ec624f475d306"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.564246 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5cwcg" event={"ID":"8775a1b7-7530-4163-9d34-b435a78fe316","Type":"ContainerStarted","Data":"249f63b4814641be5bd617f37a60928ea705cc670db8e0ace0565b513c18c138"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.564327 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5cwcg" event={"ID":"8775a1b7-7530-4163-9d34-b435a78fe316","Type":"ContainerStarted","Data":"7363d699fdbe68a6079a71cfbb3b5645a7b7e4fe981e412b7d6a4c07a238c44c"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.577792 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b2e68d75-b2e2-4e94-9d8d-630c64f144af","Type":"ContainerStarted","Data":"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.577833 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b2e68d75-b2e2-4e94-9d8d-630c64f144af","Type":"ContainerStarted","Data":"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.577845 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b2e68d75-b2e2-4e94-9d8d-630c64f144af","Type":"ContainerStarted","Data":"6e3ccd2183b9a66bded65d735d7ef55869c9c74d3a504cda0fb347dda77c574b"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.581145 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" event={"ID":"e0c7721f-be82-4859-874e-8e73cad59726","Type":"ContainerStarted","Data":"4b265b1744e308211a3c834619609e9aee69052f98b42fd277bd6827f2206ceb"} Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.597127 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.597106967 podStartE2EDuration="2.597106967s" podCreationTimestamp="2025-11-26 17:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:23.558831625 +0000 UTC m=+6124.349548773" watchObservedRunningTime="2025-11-26 17:08:23.597106967 +0000 UTC m=+6124.387824115" Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.611883 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.6118641240000002 podStartE2EDuration="2.611864124s" podCreationTimestamp="2025-11-26 17:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:23.585627602 +0000 UTC m=+6124.376344770" watchObservedRunningTime="2025-11-26 17:08:23.611864124 +0000 UTC m=+6124.402581282" Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.627231 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-5cwcg" podStartSLOduration=2.627212736 podStartE2EDuration="2.627212736s" podCreationTimestamp="2025-11-26 17:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:23.610766377 +0000 UTC m=+6124.401483555" watchObservedRunningTime="2025-11-26 17:08:23.627212736 +0000 UTC m=+6124.417929884" Nov 26 17:08:23 crc kubenswrapper[5010]: I1126 17:08:23.650130 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.650111005 podStartE2EDuration="2.650111005s" podCreationTimestamp="2025-11-26 17:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:23.639317387 +0000 UTC m=+6124.430034545" watchObservedRunningTime="2025-11-26 17:08:23.650111005 +0000 UTC m=+6124.440828143" Nov 26 17:08:24 crc kubenswrapper[5010]: I1126 17:08:24.592178 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" event={"ID":"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e","Type":"ContainerStarted","Data":"b22bbacce62f1ef27309c00b0e5589c6fcc2a2b403c687041c17075169e515f8"} Nov 26 17:08:24 crc kubenswrapper[5010]: I1126 17:08:24.592507 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:24 crc kubenswrapper[5010]: I1126 17:08:24.593687 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" event={"ID":"e0c7721f-be82-4859-874e-8e73cad59726","Type":"ContainerStarted","Data":"90b73614f3185e6d610275cdef4e34f2cc5840700c20a450255d47989c55315f"} Nov 26 17:08:24 crc kubenswrapper[5010]: I1126 17:08:24.613047 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" podStartSLOduration=3.613024838 podStartE2EDuration="3.613024838s" podCreationTimestamp="2025-11-26 17:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:24.612975626 +0000 UTC m=+6125.403692784" watchObservedRunningTime="2025-11-26 17:08:24.613024838 +0000 UTC m=+6125.403741986" Nov 26 17:08:24 crc kubenswrapper[5010]: I1126 17:08:24.637316 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" podStartSLOduration=2.637293251 podStartE2EDuration="2.637293251s" podCreationTimestamp="2025-11-26 17:08:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:24.626598355 +0000 UTC m=+6125.417315513" watchObservedRunningTime="2025-11-26 17:08:24.637293251 +0000 UTC m=+6125.428010409" Nov 26 17:08:25 crc kubenswrapper[5010]: I1126 17:08:25.660495 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:25 crc kubenswrapper[5010]: I1126 17:08:25.660691 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="d0fe1429-41a8-40b0-a49f-650f99904126" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c" gracePeriod=30 Nov 26 17:08:25 crc kubenswrapper[5010]: I1126 17:08:25.685952 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:25 crc kubenswrapper[5010]: I1126 17:08:25.686138 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-log" containerID="cri-o://fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace" gracePeriod=30 Nov 26 17:08:25 crc kubenswrapper[5010]: I1126 17:08:25.686520 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-metadata" containerID="cri-o://b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c" gracePeriod=30 Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.357696 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.407411 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-combined-ca-bundle\") pod \"6651ea75-f827-42e7-950d-6c5059e7f4ff\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.407548 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-config-data\") pod \"6651ea75-f827-42e7-950d-6c5059e7f4ff\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.407581 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcmbx\" (UniqueName: \"kubernetes.io/projected/6651ea75-f827-42e7-950d-6c5059e7f4ff-kube-api-access-lcmbx\") pod \"6651ea75-f827-42e7-950d-6c5059e7f4ff\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.407651 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6651ea75-f827-42e7-950d-6c5059e7f4ff-logs\") pod \"6651ea75-f827-42e7-950d-6c5059e7f4ff\" (UID: \"6651ea75-f827-42e7-950d-6c5059e7f4ff\") " Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.411838 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6651ea75-f827-42e7-950d-6c5059e7f4ff-logs" (OuterVolumeSpecName: "logs") pod "6651ea75-f827-42e7-950d-6c5059e7f4ff" (UID: "6651ea75-f827-42e7-950d-6c5059e7f4ff"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.436850 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6651ea75-f827-42e7-950d-6c5059e7f4ff-kube-api-access-lcmbx" (OuterVolumeSpecName: "kube-api-access-lcmbx") pod "6651ea75-f827-42e7-950d-6c5059e7f4ff" (UID: "6651ea75-f827-42e7-950d-6c5059e7f4ff"). InnerVolumeSpecName "kube-api-access-lcmbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.475181 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6651ea75-f827-42e7-950d-6c5059e7f4ff" (UID: "6651ea75-f827-42e7-950d-6c5059e7f4ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.479185 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-config-data" (OuterVolumeSpecName: "config-data") pod "6651ea75-f827-42e7-950d-6c5059e7f4ff" (UID: "6651ea75-f827-42e7-950d-6c5059e7f4ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.509972 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6651ea75-f827-42e7-950d-6c5059e7f4ff-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.510016 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.510033 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6651ea75-f827-42e7-950d-6c5059e7f4ff-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.510046 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcmbx\" (UniqueName: \"kubernetes.io/projected/6651ea75-f827-42e7-950d-6c5059e7f4ff-kube-api-access-lcmbx\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.521195 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.611114 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-combined-ca-bundle\") pod \"d0fe1429-41a8-40b0-a49f-650f99904126\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.611195 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cx8b\" (UniqueName: \"kubernetes.io/projected/d0fe1429-41a8-40b0-a49f-650f99904126-kube-api-access-2cx8b\") pod \"d0fe1429-41a8-40b0-a49f-650f99904126\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.611232 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-config-data\") pod \"d0fe1429-41a8-40b0-a49f-650f99904126\" (UID: \"d0fe1429-41a8-40b0-a49f-650f99904126\") " Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.612611 5010 generic.go:334] "Generic (PLEG): container finished" podID="d0fe1429-41a8-40b0-a49f-650f99904126" containerID="bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c" exitCode=0 Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.612730 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d0fe1429-41a8-40b0-a49f-650f99904126","Type":"ContainerDied","Data":"bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c"} Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.612762 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d0fe1429-41a8-40b0-a49f-650f99904126","Type":"ContainerDied","Data":"bef30882738adade3d12f16dd09ba2d28e05bc3866b441d92b6ec624f475d306"} Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.612782 5010 scope.go:117] "RemoveContainer" containerID="bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.612943 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.617008 5010 generic.go:334] "Generic (PLEG): container finished" podID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerID="b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c" exitCode=0 Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.617043 5010 generic.go:334] "Generic (PLEG): container finished" podID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerID="fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace" exitCode=143 Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.617067 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6651ea75-f827-42e7-950d-6c5059e7f4ff","Type":"ContainerDied","Data":"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c"} Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.617097 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6651ea75-f827-42e7-950d-6c5059e7f4ff","Type":"ContainerDied","Data":"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace"} Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.617110 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"6651ea75-f827-42e7-950d-6c5059e7f4ff","Type":"ContainerDied","Data":"1555489a505e17a5023307053baf105d1817bf69e536aaaa22a6a2621a68e8f9"} Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.617125 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.618008 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0fe1429-41a8-40b0-a49f-650f99904126-kube-api-access-2cx8b" (OuterVolumeSpecName: "kube-api-access-2cx8b") pod "d0fe1429-41a8-40b0-a49f-650f99904126" (UID: "d0fe1429-41a8-40b0-a49f-650f99904126"). InnerVolumeSpecName "kube-api-access-2cx8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.652841 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0fe1429-41a8-40b0-a49f-650f99904126" (UID: "d0fe1429-41a8-40b0-a49f-650f99904126"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.653177 5010 scope.go:117] "RemoveContainer" containerID="bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c" Nov 26 17:08:26 crc kubenswrapper[5010]: E1126 17:08:26.653737 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c\": container with ID starting with bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c not found: ID does not exist" containerID="bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.653776 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c"} err="failed to get container status \"bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c\": rpc error: code = NotFound desc = could not find container \"bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c\": container with ID starting with bfd8c5c37513b99ee47b03a6b535f46e6e86148e1f3fa284fee2e9ad28536f7c not found: ID does not exist" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.653803 5010 scope.go:117] "RemoveContainer" containerID="b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.679976 5010 scope.go:117] "RemoveContainer" containerID="fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.686092 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.689192 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.690373 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-config-data" (OuterVolumeSpecName: "config-data") pod "d0fe1429-41a8-40b0-a49f-650f99904126" (UID: "d0fe1429-41a8-40b0-a49f-650f99904126"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.704894 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.716612 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.716651 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cx8b\" (UniqueName: \"kubernetes.io/projected/d0fe1429-41a8-40b0-a49f-650f99904126-kube-api-access-2cx8b\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.716665 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0fe1429-41a8-40b0-a49f-650f99904126-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.722374 5010 scope.go:117] "RemoveContainer" containerID="b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c" Nov 26 17:08:26 crc kubenswrapper[5010]: E1126 17:08:26.727854 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c\": container with ID starting with b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c not found: ID does not exist" containerID="b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.727911 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c"} err="failed to get container status \"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c\": rpc error: code = NotFound desc = could not find container \"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c\": container with ID starting with b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c not found: ID does not exist" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.727945 5010 scope.go:117] "RemoveContainer" containerID="fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace" Nov 26 17:08:26 crc kubenswrapper[5010]: E1126 17:08:26.728508 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace\": container with ID starting with fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace not found: ID does not exist" containerID="fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.728555 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace"} err="failed to get container status \"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace\": rpc error: code = NotFound desc = could not find container \"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace\": container with ID starting with fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace not found: ID does not exist" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.728585 5010 scope.go:117] "RemoveContainer" containerID="b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.729014 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c"} err="failed to get container status \"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c\": rpc error: code = NotFound desc = could not find container \"b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c\": container with ID starting with b64df3ec57c7f5c6c57ab153421bdf4d47fa3a8b4fecd325e5cfb06b027bc03c not found: ID does not exist" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.729040 5010 scope.go:117] "RemoveContainer" containerID="fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.729353 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace"} err="failed to get container status \"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace\": rpc error: code = NotFound desc = could not find container \"fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace\": container with ID starting with fa65668b41caf8ee448e4442e395aaf140634d979e17fb1e786903166c6faace not found: ID does not exist" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.729663 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:26 crc kubenswrapper[5010]: E1126 17:08:26.730096 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-log" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.730107 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-log" Nov 26 17:08:26 crc kubenswrapper[5010]: E1126 17:08:26.730138 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0fe1429-41a8-40b0-a49f-650f99904126" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.730144 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0fe1429-41a8-40b0-a49f-650f99904126" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 17:08:26 crc kubenswrapper[5010]: E1126 17:08:26.730167 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-metadata" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.730172 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-metadata" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.730357 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-log" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.730380 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" containerName="nova-metadata-metadata" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.730394 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0fe1429-41a8-40b0-a49f-650f99904126" containerName="nova-cell1-novncproxy-novncproxy" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.731749 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.734607 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.734964 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.738588 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.817904 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-config-data\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.818115 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.818174 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lcds7\" (UniqueName: \"kubernetes.io/projected/60e75d21-83b9-439d-b515-7d176bbb5a52-kube-api-access-lcds7\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.818234 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.818537 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60e75d21-83b9-439d-b515-7d176bbb5a52-logs\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.920106 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-config-data\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.920223 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.920858 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lcds7\" (UniqueName: \"kubernetes.io/projected/60e75d21-83b9-439d-b515-7d176bbb5a52-kube-api-access-lcds7\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.920960 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.921608 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60e75d21-83b9-439d-b515-7d176bbb5a52-logs\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.922162 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60e75d21-83b9-439d-b515-7d176bbb5a52-logs\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.923586 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.924112 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.924151 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-config-data\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:26 crc kubenswrapper[5010]: I1126 17:08:26.944030 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lcds7\" (UniqueName: \"kubernetes.io/projected/60e75d21-83b9-439d-b515-7d176bbb5a52-kube-api-access-lcds7\") pod \"nova-metadata-0\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " pod="openstack/nova-metadata-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.051396 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.118492 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.146787 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.173509 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.174965 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.179106 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.179369 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.179930 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.185492 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.233704 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7579k\" (UniqueName: \"kubernetes.io/projected/43b78155-fd04-4435-a32a-21cc639a234a-kube-api-access-7579k\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.233793 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.233845 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.233979 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.234019 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.335922 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.336072 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.336104 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.336151 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7579k\" (UniqueName: \"kubernetes.io/projected/43b78155-fd04-4435-a32a-21cc639a234a-kube-api-access-7579k\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.336182 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.341641 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.343128 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.345732 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.349311 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43b78155-fd04-4435-a32a-21cc639a234a-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.361411 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7579k\" (UniqueName: \"kubernetes.io/projected/43b78155-fd04-4435-a32a-21cc639a234a-kube-api-access-7579k\") pod \"nova-cell1-novncproxy-0\" (UID: \"43b78155-fd04-4435-a32a-21cc639a234a\") " pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.495390 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.640694 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.641643 5010 generic.go:334] "Generic (PLEG): container finished" podID="e0c7721f-be82-4859-874e-8e73cad59726" containerID="90b73614f3185e6d610275cdef4e34f2cc5840700c20a450255d47989c55315f" exitCode=0 Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.641694 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" event={"ID":"e0c7721f-be82-4859-874e-8e73cad59726","Type":"ContainerDied","Data":"90b73614f3185e6d610275cdef4e34f2cc5840700c20a450255d47989c55315f"} Nov 26 17:08:27 crc kubenswrapper[5010]: W1126 17:08:27.647059 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod60e75d21_83b9_439d_b515_7d176bbb5a52.slice/crio-d5122e0d5336792f808db958a40e972eef1d6aae47db96050006a9d01cb21b7a WatchSource:0}: Error finding container d5122e0d5336792f808db958a40e972eef1d6aae47db96050006a9d01cb21b7a: Status 404 returned error can't find the container with id d5122e0d5336792f808db958a40e972eef1d6aae47db96050006a9d01cb21b7a Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.903449 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6651ea75-f827-42e7-950d-6c5059e7f4ff" path="/var/lib/kubelet/pods/6651ea75-f827-42e7-950d-6c5059e7f4ff/volumes" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.904342 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0fe1429-41a8-40b0-a49f-650f99904126" path="/var/lib/kubelet/pods/d0fe1429-41a8-40b0-a49f-650f99904126/volumes" Nov 26 17:08:27 crc kubenswrapper[5010]: I1126 17:08:27.956599 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 26 17:08:27 crc kubenswrapper[5010]: W1126 17:08:27.963114 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43b78155_fd04_4435_a32a_21cc639a234a.slice/crio-365b791e42fd860263f69f37b3aaf40edd2ab7314ab454adecd70be93df3b0fa WatchSource:0}: Error finding container 365b791e42fd860263f69f37b3aaf40edd2ab7314ab454adecd70be93df3b0fa: Status 404 returned error can't find the container with id 365b791e42fd860263f69f37b3aaf40edd2ab7314ab454adecd70be93df3b0fa Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.656150 5010 generic.go:334] "Generic (PLEG): container finished" podID="8775a1b7-7530-4163-9d34-b435a78fe316" containerID="249f63b4814641be5bd617f37a60928ea705cc670db8e0ace0565b513c18c138" exitCode=0 Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.656196 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5cwcg" event={"ID":"8775a1b7-7530-4163-9d34-b435a78fe316","Type":"ContainerDied","Data":"249f63b4814641be5bd617f37a60928ea705cc670db8e0ace0565b513c18c138"} Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.658326 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"43b78155-fd04-4435-a32a-21cc639a234a","Type":"ContainerStarted","Data":"80daeb4180f0448927b89cf9471a6d6d8f1ce656ef874c2fd8ddb3e613df20c1"} Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.658375 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"43b78155-fd04-4435-a32a-21cc639a234a","Type":"ContainerStarted","Data":"365b791e42fd860263f69f37b3aaf40edd2ab7314ab454adecd70be93df3b0fa"} Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.661743 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"60e75d21-83b9-439d-b515-7d176bbb5a52","Type":"ContainerStarted","Data":"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c"} Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.661793 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"60e75d21-83b9-439d-b515-7d176bbb5a52","Type":"ContainerStarted","Data":"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412"} Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.661804 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"60e75d21-83b9-439d-b515-7d176bbb5a52","Type":"ContainerStarted","Data":"d5122e0d5336792f808db958a40e972eef1d6aae47db96050006a9d01cb21b7a"} Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.722271 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.722245731 podStartE2EDuration="2.722245731s" podCreationTimestamp="2025-11-26 17:08:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:28.708766376 +0000 UTC m=+6129.499483554" watchObservedRunningTime="2025-11-26 17:08:28.722245731 +0000 UTC m=+6129.512962889" Nov 26 17:08:28 crc kubenswrapper[5010]: I1126 17:08:28.740255 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.7402350979999999 podStartE2EDuration="1.740235098s" podCreationTimestamp="2025-11-26 17:08:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:28.73103596 +0000 UTC m=+6129.521753118" watchObservedRunningTime="2025-11-26 17:08:28.740235098 +0000 UTC m=+6129.530952246" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.091229 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.276401 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-combined-ca-bundle\") pod \"e0c7721f-be82-4859-874e-8e73cad59726\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.276881 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-scripts\") pod \"e0c7721f-be82-4859-874e-8e73cad59726\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.277015 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-config-data\") pod \"e0c7721f-be82-4859-874e-8e73cad59726\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.277076 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rrc5p\" (UniqueName: \"kubernetes.io/projected/e0c7721f-be82-4859-874e-8e73cad59726-kube-api-access-rrc5p\") pod \"e0c7721f-be82-4859-874e-8e73cad59726\" (UID: \"e0c7721f-be82-4859-874e-8e73cad59726\") " Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.282432 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-scripts" (OuterVolumeSpecName: "scripts") pod "e0c7721f-be82-4859-874e-8e73cad59726" (UID: "e0c7721f-be82-4859-874e-8e73cad59726"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.283391 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0c7721f-be82-4859-874e-8e73cad59726-kube-api-access-rrc5p" (OuterVolumeSpecName: "kube-api-access-rrc5p") pod "e0c7721f-be82-4859-874e-8e73cad59726" (UID: "e0c7721f-be82-4859-874e-8e73cad59726"). InnerVolumeSpecName "kube-api-access-rrc5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.312914 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-config-data" (OuterVolumeSpecName: "config-data") pod "e0c7721f-be82-4859-874e-8e73cad59726" (UID: "e0c7721f-be82-4859-874e-8e73cad59726"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.319317 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e0c7721f-be82-4859-874e-8e73cad59726" (UID: "e0c7721f-be82-4859-874e-8e73cad59726"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.379096 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.379131 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.379146 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rrc5p\" (UniqueName: \"kubernetes.io/projected/e0c7721f-be82-4859-874e-8e73cad59726-kube-api-access-rrc5p\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.379160 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e0c7721f-be82-4859-874e-8e73cad59726-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.688978 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" event={"ID":"e0c7721f-be82-4859-874e-8e73cad59726","Type":"ContainerDied","Data":"4b265b1744e308211a3c834619609e9aee69052f98b42fd277bd6827f2206ceb"} Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.689039 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b265b1744e308211a3c834619609e9aee69052f98b42fd277bd6827f2206ceb" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.689610 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-j2nzv" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.760611 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 17:08:29 crc kubenswrapper[5010]: E1126 17:08:29.761626 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0c7721f-be82-4859-874e-8e73cad59726" containerName="nova-cell1-conductor-db-sync" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.761651 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0c7721f-be82-4859-874e-8e73cad59726" containerName="nova-cell1-conductor-db-sync" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.762162 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0c7721f-be82-4859-874e-8e73cad59726" containerName="nova-cell1-conductor-db-sync" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.763216 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.769622 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.816270 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.896539 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.896695 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdbbh\" (UniqueName: \"kubernetes.io/projected/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-kube-api-access-qdbbh\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:29 crc kubenswrapper[5010]: I1126 17:08:29.896816 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:29.998898 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdbbh\" (UniqueName: \"kubernetes.io/projected/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-kube-api-access-qdbbh\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:29.998977 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:29.999100 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.006083 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.007052 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.018580 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdbbh\" (UniqueName: \"kubernetes.io/projected/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-kube-api-access-qdbbh\") pod \"nova-cell1-conductor-0\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.100322 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.102614 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.204064 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-52lrq\" (UniqueName: \"kubernetes.io/projected/8775a1b7-7530-4163-9d34-b435a78fe316-kube-api-access-52lrq\") pod \"8775a1b7-7530-4163-9d34-b435a78fe316\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.204256 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-scripts\") pod \"8775a1b7-7530-4163-9d34-b435a78fe316\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.204549 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-config-data\") pod \"8775a1b7-7530-4163-9d34-b435a78fe316\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.204825 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-combined-ca-bundle\") pod \"8775a1b7-7530-4163-9d34-b435a78fe316\" (UID: \"8775a1b7-7530-4163-9d34-b435a78fe316\") " Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.208430 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-scripts" (OuterVolumeSpecName: "scripts") pod "8775a1b7-7530-4163-9d34-b435a78fe316" (UID: "8775a1b7-7530-4163-9d34-b435a78fe316"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.208896 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8775a1b7-7530-4163-9d34-b435a78fe316-kube-api-access-52lrq" (OuterVolumeSpecName: "kube-api-access-52lrq") pod "8775a1b7-7530-4163-9d34-b435a78fe316" (UID: "8775a1b7-7530-4163-9d34-b435a78fe316"). InnerVolumeSpecName "kube-api-access-52lrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.235157 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8775a1b7-7530-4163-9d34-b435a78fe316" (UID: "8775a1b7-7530-4163-9d34-b435a78fe316"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.235523 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-config-data" (OuterVolumeSpecName: "config-data") pod "8775a1b7-7530-4163-9d34-b435a78fe316" (UID: "8775a1b7-7530-4163-9d34-b435a78fe316"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.306988 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.307018 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-52lrq\" (UniqueName: \"kubernetes.io/projected/8775a1b7-7530-4163-9d34-b435a78fe316-kube-api-access-52lrq\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.307032 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.307043 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8775a1b7-7530-4163-9d34-b435a78fe316-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.551391 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.703920 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-5cwcg" event={"ID":"8775a1b7-7530-4163-9d34-b435a78fe316","Type":"ContainerDied","Data":"7363d699fdbe68a6079a71cfbb3b5645a7b7e4fe981e412b7d6a4c07a238c44c"} Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.704161 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7363d699fdbe68a6079a71cfbb3b5645a7b7e4fe981e412b7d6a4c07a238c44c" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.703947 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-5cwcg" Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.705282 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"bf0e2af8-bfad-41a1-af7e-5df8046f4c51","Type":"ContainerStarted","Data":"dc9e42b2b81090a123f96dc061204561bfc9e024f877f533d6318ca2ef9401c3"} Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.843958 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.844211 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="d0f040c3-c2dd-4450-b3c9-934374ebaf3e" containerName="nova-scheduler-scheduler" containerID="cri-o://10b48c5486e07bdd2361e652cc81d7854e0a6c930b735c4748ef3039b454a796" gracePeriod=30 Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.856253 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.856527 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-log" containerID="cri-o://288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8" gracePeriod=30 Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.857039 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-api" containerID="cri-o://ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1" gracePeriod=30 Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.880094 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.880529 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-log" containerID="cri-o://8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412" gracePeriod=30 Nov 26 17:08:30 crc kubenswrapper[5010]: I1126 17:08:30.880836 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-metadata" containerID="cri-o://82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c" gracePeriod=30 Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.486054 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.492826 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.534688 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gzkf2\" (UniqueName: \"kubernetes.io/projected/b2e68d75-b2e2-4e94-9d8d-630c64f144af-kube-api-access-gzkf2\") pod \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.534765 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2e68d75-b2e2-4e94-9d8d-630c64f144af-logs\") pod \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.534797 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-nova-metadata-tls-certs\") pod \"60e75d21-83b9-439d-b515-7d176bbb5a52\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.534825 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-combined-ca-bundle\") pod \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.535477 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2e68d75-b2e2-4e94-9d8d-630c64f144af-logs" (OuterVolumeSpecName: "logs") pod "b2e68d75-b2e2-4e94-9d8d-630c64f144af" (UID: "b2e68d75-b2e2-4e94-9d8d-630c64f144af"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.545542 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2e68d75-b2e2-4e94-9d8d-630c64f144af-kube-api-access-gzkf2" (OuterVolumeSpecName: "kube-api-access-gzkf2") pod "b2e68d75-b2e2-4e94-9d8d-630c64f144af" (UID: "b2e68d75-b2e2-4e94-9d8d-630c64f144af"). InnerVolumeSpecName "kube-api-access-gzkf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.569293 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b2e68d75-b2e2-4e94-9d8d-630c64f144af" (UID: "b2e68d75-b2e2-4e94-9d8d-630c64f144af"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.608949 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "60e75d21-83b9-439d-b515-7d176bbb5a52" (UID: "60e75d21-83b9-439d-b515-7d176bbb5a52"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.636820 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lcds7\" (UniqueName: \"kubernetes.io/projected/60e75d21-83b9-439d-b515-7d176bbb5a52-kube-api-access-lcds7\") pod \"60e75d21-83b9-439d-b515-7d176bbb5a52\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.636943 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-config-data\") pod \"60e75d21-83b9-439d-b515-7d176bbb5a52\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.636970 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-combined-ca-bundle\") pod \"60e75d21-83b9-439d-b515-7d176bbb5a52\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637011 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60e75d21-83b9-439d-b515-7d176bbb5a52-logs\") pod \"60e75d21-83b9-439d-b515-7d176bbb5a52\" (UID: \"60e75d21-83b9-439d-b515-7d176bbb5a52\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637175 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-config-data\") pod \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\" (UID: \"b2e68d75-b2e2-4e94-9d8d-630c64f144af\") " Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637403 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60e75d21-83b9-439d-b515-7d176bbb5a52-logs" (OuterVolumeSpecName: "logs") pod "60e75d21-83b9-439d-b515-7d176bbb5a52" (UID: "60e75d21-83b9-439d-b515-7d176bbb5a52"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637934 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/60e75d21-83b9-439d-b515-7d176bbb5a52-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637957 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gzkf2\" (UniqueName: \"kubernetes.io/projected/b2e68d75-b2e2-4e94-9d8d-630c64f144af-kube-api-access-gzkf2\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637969 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b2e68d75-b2e2-4e94-9d8d-630c64f144af-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637979 5010 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.637990 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.657345 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60e75d21-83b9-439d-b515-7d176bbb5a52-kube-api-access-lcds7" (OuterVolumeSpecName: "kube-api-access-lcds7") pod "60e75d21-83b9-439d-b515-7d176bbb5a52" (UID: "60e75d21-83b9-439d-b515-7d176bbb5a52"). InnerVolumeSpecName "kube-api-access-lcds7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.687116 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-config-data" (OuterVolumeSpecName: "config-data") pod "60e75d21-83b9-439d-b515-7d176bbb5a52" (UID: "60e75d21-83b9-439d-b515-7d176bbb5a52"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.706413 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-config-data" (OuterVolumeSpecName: "config-data") pod "b2e68d75-b2e2-4e94-9d8d-630c64f144af" (UID: "b2e68d75-b2e2-4e94-9d8d-630c64f144af"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.711732 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "60e75d21-83b9-439d-b515-7d176bbb5a52" (UID: "60e75d21-83b9-439d-b515-7d176bbb5a52"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.727929 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"bf0e2af8-bfad-41a1-af7e-5df8046f4c51","Type":"ContainerStarted","Data":"33524c69d30acdf21bdae789527d997f331d4e645bc459959ee72f3858610521"} Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.728811 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.735451 5010 generic.go:334] "Generic (PLEG): container finished" podID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerID="ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1" exitCode=0 Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.735484 5010 generic.go:334] "Generic (PLEG): container finished" podID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerID="288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8" exitCode=143 Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.735529 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b2e68d75-b2e2-4e94-9d8d-630c64f144af","Type":"ContainerDied","Data":"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1"} Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.735555 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b2e68d75-b2e2-4e94-9d8d-630c64f144af","Type":"ContainerDied","Data":"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8"} Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.735566 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b2e68d75-b2e2-4e94-9d8d-630c64f144af","Type":"ContainerDied","Data":"6e3ccd2183b9a66bded65d735d7ef55869c9c74d3a504cda0fb347dda77c574b"} Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.735583 5010 scope.go:117] "RemoveContainer" containerID="ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.735734 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.738667 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.738701 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/60e75d21-83b9-439d-b515-7d176bbb5a52-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.738727 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e68d75-b2e2-4e94-9d8d-630c64f144af-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.738737 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lcds7\" (UniqueName: \"kubernetes.io/projected/60e75d21-83b9-439d-b515-7d176bbb5a52-kube-api-access-lcds7\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.760552 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.760536236 podStartE2EDuration="2.760536236s" podCreationTimestamp="2025-11-26 17:08:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:31.755985763 +0000 UTC m=+6132.546702911" watchObservedRunningTime="2025-11-26 17:08:31.760536236 +0000 UTC m=+6132.551253384" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.762413 5010 generic.go:334] "Generic (PLEG): container finished" podID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerID="82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c" exitCode=0 Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.762441 5010 generic.go:334] "Generic (PLEG): container finished" podID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerID="8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412" exitCode=143 Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.762465 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.762465 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"60e75d21-83b9-439d-b515-7d176bbb5a52","Type":"ContainerDied","Data":"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c"} Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.763367 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"60e75d21-83b9-439d-b515-7d176bbb5a52","Type":"ContainerDied","Data":"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412"} Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.763381 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"60e75d21-83b9-439d-b515-7d176bbb5a52","Type":"ContainerDied","Data":"d5122e0d5336792f808db958a40e972eef1d6aae47db96050006a9d01cb21b7a"} Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.793405 5010 scope.go:117] "RemoveContainer" containerID="288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.799597 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.817603 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.825464 5010 scope.go:117] "RemoveContainer" containerID="ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.826870 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1\": container with ID starting with ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1 not found: ID does not exist" containerID="ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.826907 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1"} err="failed to get container status \"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1\": rpc error: code = NotFound desc = could not find container \"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1\": container with ID starting with ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1 not found: ID does not exist" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.826936 5010 scope.go:117] "RemoveContainer" containerID="288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.828287 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8\": container with ID starting with 288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8 not found: ID does not exist" containerID="288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.828313 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8"} err="failed to get container status \"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8\": rpc error: code = NotFound desc = could not find container \"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8\": container with ID starting with 288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8 not found: ID does not exist" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.828328 5010 scope.go:117] "RemoveContainer" containerID="ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.828590 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1"} err="failed to get container status \"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1\": rpc error: code = NotFound desc = could not find container \"ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1\": container with ID starting with ca375b2de9a2c82f0532fac892febe19567602c673edb1a937b39e3ad3ae07e1 not found: ID does not exist" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.828608 5010 scope.go:117] "RemoveContainer" containerID="288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.828961 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8"} err="failed to get container status \"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8\": rpc error: code = NotFound desc = could not find container \"288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8\": container with ID starting with 288f0538623be0954e9b25955df404279c39092bb872c940c769d7af5a1d19c8 not found: ID does not exist" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.828980 5010 scope.go:117] "RemoveContainer" containerID="82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.834601 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.835163 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-log" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835186 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-log" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.835227 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8775a1b7-7530-4163-9d34-b435a78fe316" containerName="nova-manage" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835237 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8775a1b7-7530-4163-9d34-b435a78fe316" containerName="nova-manage" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.835255 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-api" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835263 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-api" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.835274 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-metadata" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835281 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-metadata" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.835317 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-log" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835325 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-log" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835542 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-log" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835575 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8775a1b7-7530-4163-9d34-b435a78fe316" containerName="nova-manage" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835585 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" containerName="nova-api-api" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835605 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-log" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.835625 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" containerName="nova-metadata-metadata" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.836975 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.843882 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.846190 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.856098 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.884717 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.909599 5010 scope.go:117] "RemoveContainer" containerID="8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.915798 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60e75d21-83b9-439d-b515-7d176bbb5a52" path="/var/lib/kubelet/pods/60e75d21-83b9-439d-b515-7d176bbb5a52/volumes" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.916720 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2e68d75-b2e2-4e94-9d8d-630c64f144af" path="/var/lib/kubelet/pods/b2e68d75-b2e2-4e94-9d8d-630c64f144af/volumes" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.917356 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.918867 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.924101 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.924352 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.942319 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-logs\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.942591 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.942748 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-config-data\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.942988 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f7rkc\" (UniqueName: \"kubernetes.io/projected/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-kube-api-access-f7rkc\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.967352 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.973797 5010 scope.go:117] "RemoveContainer" containerID="82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.980218 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c\": container with ID starting with 82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c not found: ID does not exist" containerID="82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.980272 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c"} err="failed to get container status \"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c\": rpc error: code = NotFound desc = could not find container \"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c\": container with ID starting with 82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c not found: ID does not exist" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.980307 5010 scope.go:117] "RemoveContainer" containerID="8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412" Nov 26 17:08:31 crc kubenswrapper[5010]: E1126 17:08:31.981780 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412\": container with ID starting with 8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412 not found: ID does not exist" containerID="8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.981851 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412"} err="failed to get container status \"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412\": rpc error: code = NotFound desc = could not find container \"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412\": container with ID starting with 8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412 not found: ID does not exist" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.981885 5010 scope.go:117] "RemoveContainer" containerID="82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.982217 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c"} err="failed to get container status \"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c\": rpc error: code = NotFound desc = could not find container \"82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c\": container with ID starting with 82e474de608c3b6fa290ee88620e1a9d735f8562dafc8b9084842634f721132c not found: ID does not exist" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.982234 5010 scope.go:117] "RemoveContainer" containerID="8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412" Nov 26 17:08:31 crc kubenswrapper[5010]: I1126 17:08:31.982485 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412"} err="failed to get container status \"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412\": rpc error: code = NotFound desc = could not find container \"8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412\": container with ID starting with 8e6e8238921099f85fbc33d27ae95d29242cb1b2b0fc9471aa467d5812a23412 not found: ID does not exist" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.044754 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045102 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-config-data\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045201 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f7rkc\" (UniqueName: \"kubernetes.io/projected/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-kube-api-access-f7rkc\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045225 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9llh\" (UniqueName: \"kubernetes.io/projected/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-kube-api-access-g9llh\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045251 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-logs\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045282 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045302 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-logs\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045376 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.045400 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-config-data\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.046653 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-logs\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.049090 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-config-data\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.051235 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.060686 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f7rkc\" (UniqueName: \"kubernetes.io/projected/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-kube-api-access-f7rkc\") pod \"nova-api-0\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.091856 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.147483 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.147536 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-config-data\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.147612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9llh\" (UniqueName: \"kubernetes.io/projected/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-kube-api-access-g9llh\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.147640 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-logs\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.147673 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.151926 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-logs\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.155674 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.158021 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866b588b8c-lgtkl"] Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.158395 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" podUID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerName="dnsmasq-dns" containerID="cri-o://9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632" gracePeriod=10 Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.159617 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-config-data\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.161001 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.174862 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.206472 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9llh\" (UniqueName: \"kubernetes.io/projected/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-kube-api-access-g9llh\") pod \"nova-metadata-0\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.266303 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.496013 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.609550 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.764389 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-nb\") pod \"58bd9152-e037-4d43-abf2-513e32b0eb0a\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.764456 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tn8g\" (UniqueName: \"kubernetes.io/projected/58bd9152-e037-4d43-abf2-513e32b0eb0a-kube-api-access-5tn8g\") pod \"58bd9152-e037-4d43-abf2-513e32b0eb0a\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.764537 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-dns-svc\") pod \"58bd9152-e037-4d43-abf2-513e32b0eb0a\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.764652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-config\") pod \"58bd9152-e037-4d43-abf2-513e32b0eb0a\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.772034 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58bd9152-e037-4d43-abf2-513e32b0eb0a-kube-api-access-5tn8g" (OuterVolumeSpecName: "kube-api-access-5tn8g") pod "58bd9152-e037-4d43-abf2-513e32b0eb0a" (UID: "58bd9152-e037-4d43-abf2-513e32b0eb0a"). InnerVolumeSpecName "kube-api-access-5tn8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.773812 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-sb\") pod \"58bd9152-e037-4d43-abf2-513e32b0eb0a\" (UID: \"58bd9152-e037-4d43-abf2-513e32b0eb0a\") " Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.774820 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tn8g\" (UniqueName: \"kubernetes.io/projected/58bd9152-e037-4d43-abf2-513e32b0eb0a-kube-api-access-5tn8g\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.782958 5010 generic.go:334] "Generic (PLEG): container finished" podID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerID="9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632" exitCode=0 Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.783102 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.783586 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" event={"ID":"58bd9152-e037-4d43-abf2-513e32b0eb0a","Type":"ContainerDied","Data":"9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632"} Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.783620 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-866b588b8c-lgtkl" event={"ID":"58bd9152-e037-4d43-abf2-513e32b0eb0a","Type":"ContainerDied","Data":"4bc4947b8234690adf33a38e5260edb6197aa29237e3e320046a644660a60e70"} Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.783640 5010 scope.go:117] "RemoveContainer" containerID="9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.795618 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.841102 5010 scope.go:117] "RemoveContainer" containerID="70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.849176 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "58bd9152-e037-4d43-abf2-513e32b0eb0a" (UID: "58bd9152-e037-4d43-abf2-513e32b0eb0a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.852802 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-config" (OuterVolumeSpecName: "config") pod "58bd9152-e037-4d43-abf2-513e32b0eb0a" (UID: "58bd9152-e037-4d43-abf2-513e32b0eb0a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.856860 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "58bd9152-e037-4d43-abf2-513e32b0eb0a" (UID: "58bd9152-e037-4d43-abf2-513e32b0eb0a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.859160 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "58bd9152-e037-4d43-abf2-513e32b0eb0a" (UID: "58bd9152-e037-4d43-abf2-513e32b0eb0a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.863451 5010 scope.go:117] "RemoveContainer" containerID="9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632" Nov 26 17:08:32 crc kubenswrapper[5010]: E1126 17:08:32.863911 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632\": container with ID starting with 9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632 not found: ID does not exist" containerID="9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.863950 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632"} err="failed to get container status \"9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632\": rpc error: code = NotFound desc = could not find container \"9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632\": container with ID starting with 9784e2ae8432bd93b623beead93e484795db5c9e8d2b8b6dce612493427bb632 not found: ID does not exist" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.863986 5010 scope.go:117] "RemoveContainer" containerID="70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8" Nov 26 17:08:32 crc kubenswrapper[5010]: E1126 17:08:32.864296 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8\": container with ID starting with 70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8 not found: ID does not exist" containerID="70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.864321 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8"} err="failed to get container status \"70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8\": rpc error: code = NotFound desc = could not find container \"70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8\": container with ID starting with 70c3ce83195d894d5b0de1363d8f754319c04cf920d02b2229e29398fec5c9a8 not found: ID does not exist" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.876318 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.876354 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.876378 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.876391 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58bd9152-e037-4d43-abf2-513e32b0eb0a-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:32 crc kubenswrapper[5010]: W1126 17:08:32.916190 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod975fbb21_9a63_43b8_b5b8_e9141ed6a16d.slice/crio-b94239380a4dda611c94e5d9345d9435a8b983e66ebd76fbf904d7b9108286d9 WatchSource:0}: Error finding container b94239380a4dda611c94e5d9345d9435a8b983e66ebd76fbf904d7b9108286d9: Status 404 returned error can't find the container with id b94239380a4dda611c94e5d9345d9435a8b983e66ebd76fbf904d7b9108286d9 Nov 26 17:08:32 crc kubenswrapper[5010]: I1126 17:08:32.918119 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.164730 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-866b588b8c-lgtkl"] Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.172909 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-866b588b8c-lgtkl"] Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.807382 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"975fbb21-9a63-43b8-b5b8-e9141ed6a16d","Type":"ContainerStarted","Data":"f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223"} Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.807435 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"975fbb21-9a63-43b8-b5b8-e9141ed6a16d","Type":"ContainerStarted","Data":"0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63"} Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.807450 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"975fbb21-9a63-43b8-b5b8-e9141ed6a16d","Type":"ContainerStarted","Data":"b94239380a4dda611c94e5d9345d9435a8b983e66ebd76fbf904d7b9108286d9"} Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.811554 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8fa78714-b16f-46a5-8d5b-7657d0f89b5b","Type":"ContainerStarted","Data":"994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc"} Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.811588 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8fa78714-b16f-46a5-8d5b-7657d0f89b5b","Type":"ContainerStarted","Data":"db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552"} Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.811600 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8fa78714-b16f-46a5-8d5b-7657d0f89b5b","Type":"ContainerStarted","Data":"8b9a7363fc22e3b8e069891eb98d6a8cd1f9a43515a77d6ee829e5a796947e25"} Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.836562 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.836535674 podStartE2EDuration="2.836535674s" podCreationTimestamp="2025-11-26 17:08:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:33.829225652 +0000 UTC m=+6134.619942840" watchObservedRunningTime="2025-11-26 17:08:33.836535674 +0000 UTC m=+6134.627252822" Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.854737 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.854691426 podStartE2EDuration="2.854691426s" podCreationTimestamp="2025-11-26 17:08:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:33.848397889 +0000 UTC m=+6134.639115047" watchObservedRunningTime="2025-11-26 17:08:33.854691426 +0000 UTC m=+6134.645408584" Nov 26 17:08:33 crc kubenswrapper[5010]: I1126 17:08:33.918035 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58bd9152-e037-4d43-abf2-513e32b0eb0a" path="/var/lib/kubelet/pods/58bd9152-e037-4d43-abf2-513e32b0eb0a/volumes" Nov 26 17:08:35 crc kubenswrapper[5010]: I1126 17:08:35.128992 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 17:08:37 crc kubenswrapper[5010]: I1126 17:08:37.267126 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 17:08:37 crc kubenswrapper[5010]: I1126 17:08:37.267431 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 17:08:37 crc kubenswrapper[5010]: I1126 17:08:37.495620 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:37 crc kubenswrapper[5010]: I1126 17:08:37.514221 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:37 crc kubenswrapper[5010]: I1126 17:08:37.879145 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.072380 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-8gjck"] Nov 26 17:08:38 crc kubenswrapper[5010]: E1126 17:08:38.072793 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerName="dnsmasq-dns" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.072805 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerName="dnsmasq-dns" Nov 26 17:08:38 crc kubenswrapper[5010]: E1126 17:08:38.072854 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerName="init" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.072860 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerName="init" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.073019 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="58bd9152-e037-4d43-abf2-513e32b0eb0a" containerName="dnsmasq-dns" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.073684 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.076695 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.079103 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.087430 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svl6c\" (UniqueName: \"kubernetes.io/projected/7ccb3027-4d8f-452f-b96d-76a970475d7a-kube-api-access-svl6c\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.087514 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-config-data\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.087549 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.087567 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-scripts\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.088491 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-8gjck"] Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.190008 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svl6c\" (UniqueName: \"kubernetes.io/projected/7ccb3027-4d8f-452f-b96d-76a970475d7a-kube-api-access-svl6c\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.190107 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-config-data\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.190150 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.190172 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-scripts\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.196518 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-config-data\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.197284 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-scripts\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.200324 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.208220 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svl6c\" (UniqueName: \"kubernetes.io/projected/7ccb3027-4d8f-452f-b96d-76a970475d7a-kube-api-access-svl6c\") pod \"nova-cell1-cell-mapping-8gjck\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.399658 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:38 crc kubenswrapper[5010]: W1126 17:08:38.955542 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ccb3027_4d8f_452f_b96d_76a970475d7a.slice/crio-6df5debc13df152fee4db4dd7578df1e8c5b7ca315a5e1c18249040ba428c0c6 WatchSource:0}: Error finding container 6df5debc13df152fee4db4dd7578df1e8c5b7ca315a5e1c18249040ba428c0c6: Status 404 returned error can't find the container with id 6df5debc13df152fee4db4dd7578df1e8c5b7ca315a5e1c18249040ba428c0c6 Nov 26 17:08:38 crc kubenswrapper[5010]: I1126 17:08:38.958489 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-8gjck"] Nov 26 17:08:39 crc kubenswrapper[5010]: I1126 17:08:39.932557 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8gjck" event={"ID":"7ccb3027-4d8f-452f-b96d-76a970475d7a","Type":"ContainerStarted","Data":"f7512cc25f723adf1798cb87edc561375efbf7cd2e745a9d86031836d4706208"} Nov 26 17:08:39 crc kubenswrapper[5010]: I1126 17:08:39.933030 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8gjck" event={"ID":"7ccb3027-4d8f-452f-b96d-76a970475d7a","Type":"ContainerStarted","Data":"6df5debc13df152fee4db4dd7578df1e8c5b7ca315a5e1c18249040ba428c0c6"} Nov 26 17:08:39 crc kubenswrapper[5010]: I1126 17:08:39.952018 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-8gjck" podStartSLOduration=1.9519953719999998 podStartE2EDuration="1.951995372s" podCreationTimestamp="2025-11-26 17:08:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:39.947217303 +0000 UTC m=+6140.737934451" watchObservedRunningTime="2025-11-26 17:08:39.951995372 +0000 UTC m=+6140.742712520" Nov 26 17:08:41 crc kubenswrapper[5010]: I1126 17:08:41.423156 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:08:41 crc kubenswrapper[5010]: I1126 17:08:41.423697 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:08:42 crc kubenswrapper[5010]: I1126 17:08:42.176952 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 17:08:42 crc kubenswrapper[5010]: I1126 17:08:42.177031 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 17:08:42 crc kubenswrapper[5010]: I1126 17:08:42.267884 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 17:08:42 crc kubenswrapper[5010]: I1126 17:08:42.267923 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 17:08:43 crc kubenswrapper[5010]: I1126 17:08:43.259962 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.105:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 17:08:43 crc kubenswrapper[5010]: I1126 17:08:43.260384 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.105:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 17:08:43 crc kubenswrapper[5010]: I1126 17:08:43.288965 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.106:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 17:08:43 crc kubenswrapper[5010]: I1126 17:08:43.289203 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.106:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 17:08:43 crc kubenswrapper[5010]: I1126 17:08:43.965879 5010 generic.go:334] "Generic (PLEG): container finished" podID="7ccb3027-4d8f-452f-b96d-76a970475d7a" containerID="f7512cc25f723adf1798cb87edc561375efbf7cd2e745a9d86031836d4706208" exitCode=0 Nov 26 17:08:43 crc kubenswrapper[5010]: I1126 17:08:43.965918 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8gjck" event={"ID":"7ccb3027-4d8f-452f-b96d-76a970475d7a","Type":"ContainerDied","Data":"f7512cc25f723adf1798cb87edc561375efbf7cd2e745a9d86031836d4706208"} Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.384234 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.479618 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-config-data\") pod \"7ccb3027-4d8f-452f-b96d-76a970475d7a\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.479806 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-combined-ca-bundle\") pod \"7ccb3027-4d8f-452f-b96d-76a970475d7a\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.479989 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svl6c\" (UniqueName: \"kubernetes.io/projected/7ccb3027-4d8f-452f-b96d-76a970475d7a-kube-api-access-svl6c\") pod \"7ccb3027-4d8f-452f-b96d-76a970475d7a\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.480238 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-scripts\") pod \"7ccb3027-4d8f-452f-b96d-76a970475d7a\" (UID: \"7ccb3027-4d8f-452f-b96d-76a970475d7a\") " Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.485792 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ccb3027-4d8f-452f-b96d-76a970475d7a-kube-api-access-svl6c" (OuterVolumeSpecName: "kube-api-access-svl6c") pod "7ccb3027-4d8f-452f-b96d-76a970475d7a" (UID: "7ccb3027-4d8f-452f-b96d-76a970475d7a"). InnerVolumeSpecName "kube-api-access-svl6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.487630 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-scripts" (OuterVolumeSpecName: "scripts") pod "7ccb3027-4d8f-452f-b96d-76a970475d7a" (UID: "7ccb3027-4d8f-452f-b96d-76a970475d7a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.514460 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-config-data" (OuterVolumeSpecName: "config-data") pod "7ccb3027-4d8f-452f-b96d-76a970475d7a" (UID: "7ccb3027-4d8f-452f-b96d-76a970475d7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.540513 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7ccb3027-4d8f-452f-b96d-76a970475d7a" (UID: "7ccb3027-4d8f-452f-b96d-76a970475d7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.584449 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.584504 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.584522 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ccb3027-4d8f-452f-b96d-76a970475d7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.584537 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svl6c\" (UniqueName: \"kubernetes.io/projected/7ccb3027-4d8f-452f-b96d-76a970475d7a-kube-api-access-svl6c\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.986040 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-8gjck" event={"ID":"7ccb3027-4d8f-452f-b96d-76a970475d7a","Type":"ContainerDied","Data":"6df5debc13df152fee4db4dd7578df1e8c5b7ca315a5e1c18249040ba428c0c6"} Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.986091 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6df5debc13df152fee4db4dd7578df1e8c5b7ca315a5e1c18249040ba428c0c6" Nov 26 17:08:45 crc kubenswrapper[5010]: I1126 17:08:45.986091 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-8gjck" Nov 26 17:08:46 crc kubenswrapper[5010]: I1126 17:08:46.182818 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:46 crc kubenswrapper[5010]: I1126 17:08:46.183369 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-log" containerID="cri-o://db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552" gracePeriod=30 Nov 26 17:08:46 crc kubenswrapper[5010]: I1126 17:08:46.183450 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-api" containerID="cri-o://994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc" gracePeriod=30 Nov 26 17:08:46 crc kubenswrapper[5010]: I1126 17:08:46.245338 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:46 crc kubenswrapper[5010]: I1126 17:08:46.245615 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-log" containerID="cri-o://0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63" gracePeriod=30 Nov 26 17:08:46 crc kubenswrapper[5010]: I1126 17:08:46.245849 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-metadata" containerID="cri-o://f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223" gracePeriod=30 Nov 26 17:08:46 crc kubenswrapper[5010]: I1126 17:08:46.998923 5010 generic.go:334] "Generic (PLEG): container finished" podID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerID="0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63" exitCode=143 Nov 26 17:08:47 crc kubenswrapper[5010]: I1126 17:08:46.999013 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"975fbb21-9a63-43b8-b5b8-e9141ed6a16d","Type":"ContainerDied","Data":"0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63"} Nov 26 17:08:47 crc kubenswrapper[5010]: I1126 17:08:47.001443 5010 generic.go:334] "Generic (PLEG): container finished" podID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerID="db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552" exitCode=143 Nov 26 17:08:47 crc kubenswrapper[5010]: I1126 17:08:47.001480 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8fa78714-b16f-46a5-8d5b-7657d0f89b5b","Type":"ContainerDied","Data":"db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552"} Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.808189 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.813408 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.864974 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-logs\") pod \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865062 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-config-data\") pod \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865131 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-combined-ca-bundle\") pod \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865249 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-logs\") pod \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865274 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-config-data\") pod \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865300 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-nova-metadata-tls-certs\") pod \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865365 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f7rkc\" (UniqueName: \"kubernetes.io/projected/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-kube-api-access-f7rkc\") pod \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\" (UID: \"8fa78714-b16f-46a5-8d5b-7657d0f89b5b\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865390 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9llh\" (UniqueName: \"kubernetes.io/projected/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-kube-api-access-g9llh\") pod \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.865473 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-combined-ca-bundle\") pod \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\" (UID: \"975fbb21-9a63-43b8-b5b8-e9141ed6a16d\") " Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.868406 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-logs" (OuterVolumeSpecName: "logs") pod "975fbb21-9a63-43b8-b5b8-e9141ed6a16d" (UID: "975fbb21-9a63-43b8-b5b8-e9141ed6a16d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.868587 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-logs" (OuterVolumeSpecName: "logs") pod "8fa78714-b16f-46a5-8d5b-7657d0f89b5b" (UID: "8fa78714-b16f-46a5-8d5b-7657d0f89b5b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.872188 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-kube-api-access-g9llh" (OuterVolumeSpecName: "kube-api-access-g9llh") pod "975fbb21-9a63-43b8-b5b8-e9141ed6a16d" (UID: "975fbb21-9a63-43b8-b5b8-e9141ed6a16d"). InnerVolumeSpecName "kube-api-access-g9llh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.873878 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-kube-api-access-f7rkc" (OuterVolumeSpecName: "kube-api-access-f7rkc") pod "8fa78714-b16f-46a5-8d5b-7657d0f89b5b" (UID: "8fa78714-b16f-46a5-8d5b-7657d0f89b5b"). InnerVolumeSpecName "kube-api-access-f7rkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.902992 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-config-data" (OuterVolumeSpecName: "config-data") pod "975fbb21-9a63-43b8-b5b8-e9141ed6a16d" (UID: "975fbb21-9a63-43b8-b5b8-e9141ed6a16d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.909793 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-config-data" (OuterVolumeSpecName: "config-data") pod "8fa78714-b16f-46a5-8d5b-7657d0f89b5b" (UID: "8fa78714-b16f-46a5-8d5b-7657d0f89b5b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.913702 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "975fbb21-9a63-43b8-b5b8-e9141ed6a16d" (UID: "975fbb21-9a63-43b8-b5b8-e9141ed6a16d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.922743 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8fa78714-b16f-46a5-8d5b-7657d0f89b5b" (UID: "8fa78714-b16f-46a5-8d5b-7657d0f89b5b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.939768 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "975fbb21-9a63-43b8-b5b8-e9141ed6a16d" (UID: "975fbb21-9a63-43b8-b5b8-e9141ed6a16d"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981048 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981087 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981100 5010 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981113 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f7rkc\" (UniqueName: \"kubernetes.io/projected/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-kube-api-access-f7rkc\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981124 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9llh\" (UniqueName: \"kubernetes.io/projected/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-kube-api-access-g9llh\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981140 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981153 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981164 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/975fbb21-9a63-43b8-b5b8-e9141ed6a16d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:49 crc kubenswrapper[5010]: I1126 17:08:49.981175 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fa78714-b16f-46a5-8d5b-7657d0f89b5b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.033695 5010 generic.go:334] "Generic (PLEG): container finished" podID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerID="994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc" exitCode=0 Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.033789 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.033778 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8fa78714-b16f-46a5-8d5b-7657d0f89b5b","Type":"ContainerDied","Data":"994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc"} Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.033873 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"8fa78714-b16f-46a5-8d5b-7657d0f89b5b","Type":"ContainerDied","Data":"8b9a7363fc22e3b8e069891eb98d6a8cd1f9a43515a77d6ee829e5a796947e25"} Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.033898 5010 scope.go:117] "RemoveContainer" containerID="994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.036232 5010 generic.go:334] "Generic (PLEG): container finished" podID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerID="f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223" exitCode=0 Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.036272 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"975fbb21-9a63-43b8-b5b8-e9141ed6a16d","Type":"ContainerDied","Data":"f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223"} Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.036342 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"975fbb21-9a63-43b8-b5b8-e9141ed6a16d","Type":"ContainerDied","Data":"b94239380a4dda611c94e5d9345d9435a8b983e66ebd76fbf904d7b9108286d9"} Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.036343 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.094112 5010 scope.go:117] "RemoveContainer" containerID="db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.101155 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.138290 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.145598 5010 scope.go:117] "RemoveContainer" containerID="994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.146027 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc\": container with ID starting with 994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc not found: ID does not exist" containerID="994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.146187 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc"} err="failed to get container status \"994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc\": rpc error: code = NotFound desc = could not find container \"994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc\": container with ID starting with 994337e9a5f7dfc7b9b2a6c83e110eea720a91998b8118c8ab10d2068e00acfc not found: ID does not exist" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.146296 5010 scope.go:117] "RemoveContainer" containerID="db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.146778 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552\": container with ID starting with db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552 not found: ID does not exist" containerID="db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.146802 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552"} err="failed to get container status \"db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552\": rpc error: code = NotFound desc = could not find container \"db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552\": container with ID starting with db9f7b856fa02234f671f12057f3b6ed9bc5eddac6af19d2bcd67b4106cd0552 not found: ID does not exist" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.146816 5010 scope.go:117] "RemoveContainer" containerID="f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.161348 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.169285 5010 scope.go:117] "RemoveContainer" containerID="0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.174999 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.184855 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.185500 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ccb3027-4d8f-452f-b96d-76a970475d7a" containerName="nova-manage" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185530 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ccb3027-4d8f-452f-b96d-76a970475d7a" containerName="nova-manage" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.185554 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-log" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185564 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-log" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.185598 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-metadata" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185609 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-metadata" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.185636 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-api" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185644 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-api" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.185668 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-log" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185677 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-log" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185925 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ccb3027-4d8f-452f-b96d-76a970475d7a" containerName="nova-manage" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185945 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-metadata" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185956 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-api" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185977 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" containerName="nova-metadata-log" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.185990 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" containerName="nova-api-log" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.188456 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.191511 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.193098 5010 scope.go:117] "RemoveContainer" containerID="f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.193814 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223\": container with ID starting with f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223 not found: ID does not exist" containerID="f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.193869 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223"} err="failed to get container status \"f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223\": rpc error: code = NotFound desc = could not find container \"f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223\": container with ID starting with f6d5b46752727a6d23342aa7116bb20c31da00e444a714ffead3589e6db9e223 not found: ID does not exist" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.193904 5010 scope.go:117] "RemoveContainer" containerID="0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63" Nov 26 17:08:50 crc kubenswrapper[5010]: E1126 17:08:50.194237 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63\": container with ID starting with 0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63 not found: ID does not exist" containerID="0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.194338 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63"} err="failed to get container status \"0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63\": rpc error: code = NotFound desc = could not find container \"0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63\": container with ID starting with 0fe30e1c4b20d10e4a92ef90a63be399040c8588f6597c0caf02ff65d9262c63 not found: ID does not exist" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.197097 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.224469 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.224616 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.227343 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.227549 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.235274 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.287895 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.287955 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27b7d05d-503e-4417-abc0-4c9b58aec030-logs\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.287988 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvb8q\" (UniqueName: \"kubernetes.io/projected/27b7d05d-503e-4417-abc0-4c9b58aec030-kube-api-access-zvb8q\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.288167 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-config-data\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.288322 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.288549 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43cbb13b-0c00-499f-96ee-6e815ce62895-logs\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.288591 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ctxt\" (UniqueName: \"kubernetes.io/projected/43cbb13b-0c00-499f-96ee-6e815ce62895-kube-api-access-5ctxt\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.288681 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-config-data\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.288828 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390416 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27b7d05d-503e-4417-abc0-4c9b58aec030-logs\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390485 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvb8q\" (UniqueName: \"kubernetes.io/projected/27b7d05d-503e-4417-abc0-4c9b58aec030-kube-api-access-zvb8q\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390520 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-config-data\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390555 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390615 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43cbb13b-0c00-499f-96ee-6e815ce62895-logs\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390636 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ctxt\" (UniqueName: \"kubernetes.io/projected/43cbb13b-0c00-499f-96ee-6e815ce62895-kube-api-access-5ctxt\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390660 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-config-data\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390697 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.390764 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.391034 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27b7d05d-503e-4417-abc0-4c9b58aec030-logs\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.391367 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43cbb13b-0c00-499f-96ee-6e815ce62895-logs\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.394386 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.395137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-config-data\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.395400 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-config-data\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.396123 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.399163 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.408846 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvb8q\" (UniqueName: \"kubernetes.io/projected/27b7d05d-503e-4417-abc0-4c9b58aec030-kube-api-access-zvb8q\") pod \"nova-metadata-0\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.413406 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ctxt\" (UniqueName: \"kubernetes.io/projected/43cbb13b-0c00-499f-96ee-6e815ce62895-kube-api-access-5ctxt\") pod \"nova-api-0\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.516957 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.546156 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 17:08:50 crc kubenswrapper[5010]: I1126 17:08:50.952631 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 17:08:51 crc kubenswrapper[5010]: I1126 17:08:51.053119 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"27b7d05d-503e-4417-abc0-4c9b58aec030","Type":"ContainerStarted","Data":"3107d9fc1f7afb1f7e7c57b3d21ad7a84968d16623b59dc1c8a99b589b230e27"} Nov 26 17:08:51 crc kubenswrapper[5010]: I1126 17:08:51.187591 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:08:51 crc kubenswrapper[5010]: W1126 17:08:51.195316 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod43cbb13b_0c00_499f_96ee_6e815ce62895.slice/crio-0b042cc709fb6a5fbdfa033fbd30b5875a445ae6ea54852e5f8483e97a92161b WatchSource:0}: Error finding container 0b042cc709fb6a5fbdfa033fbd30b5875a445ae6ea54852e5f8483e97a92161b: Status 404 returned error can't find the container with id 0b042cc709fb6a5fbdfa033fbd30b5875a445ae6ea54852e5f8483e97a92161b Nov 26 17:08:51 crc kubenswrapper[5010]: I1126 17:08:51.909626 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fa78714-b16f-46a5-8d5b-7657d0f89b5b" path="/var/lib/kubelet/pods/8fa78714-b16f-46a5-8d5b-7657d0f89b5b/volumes" Nov 26 17:08:51 crc kubenswrapper[5010]: I1126 17:08:51.911740 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="975fbb21-9a63-43b8-b5b8-e9141ed6a16d" path="/var/lib/kubelet/pods/975fbb21-9a63-43b8-b5b8-e9141ed6a16d/volumes" Nov 26 17:08:52 crc kubenswrapper[5010]: I1126 17:08:52.067977 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"27b7d05d-503e-4417-abc0-4c9b58aec030","Type":"ContainerStarted","Data":"3479b37319a03cc7eb1fcfe947294c65c254f47c77a5bd2d2b976a8aeb25b40f"} Nov 26 17:08:52 crc kubenswrapper[5010]: I1126 17:08:52.068035 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"27b7d05d-503e-4417-abc0-4c9b58aec030","Type":"ContainerStarted","Data":"0d5eb9baf703c29ae969ea38e5dab0f1636b77b82498c3e1917f7088fde488dd"} Nov 26 17:08:52 crc kubenswrapper[5010]: I1126 17:08:52.070399 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"43cbb13b-0c00-499f-96ee-6e815ce62895","Type":"ContainerStarted","Data":"5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52"} Nov 26 17:08:52 crc kubenswrapper[5010]: I1126 17:08:52.070451 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"43cbb13b-0c00-499f-96ee-6e815ce62895","Type":"ContainerStarted","Data":"800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05"} Nov 26 17:08:52 crc kubenswrapper[5010]: I1126 17:08:52.070465 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"43cbb13b-0c00-499f-96ee-6e815ce62895","Type":"ContainerStarted","Data":"0b042cc709fb6a5fbdfa033fbd30b5875a445ae6ea54852e5f8483e97a92161b"} Nov 26 17:08:52 crc kubenswrapper[5010]: I1126 17:08:52.092117 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.092085947 podStartE2EDuration="2.092085947s" podCreationTimestamp="2025-11-26 17:08:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:52.085727209 +0000 UTC m=+6152.876444407" watchObservedRunningTime="2025-11-26 17:08:52.092085947 +0000 UTC m=+6152.882803135" Nov 26 17:08:52 crc kubenswrapper[5010]: I1126 17:08:52.126115 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.126095283 podStartE2EDuration="2.126095283s" podCreationTimestamp="2025-11-26 17:08:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:08:52.103174923 +0000 UTC m=+6152.893892071" watchObservedRunningTime="2025-11-26 17:08:52.126095283 +0000 UTC m=+6152.916812431" Nov 26 17:08:55 crc kubenswrapper[5010]: I1126 17:08:55.547404 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 17:08:55 crc kubenswrapper[5010]: I1126 17:08:55.547912 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 17:09:00 crc kubenswrapper[5010]: I1126 17:09:00.517236 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 17:09:00 crc kubenswrapper[5010]: I1126 17:09:00.517886 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 17:09:00 crc kubenswrapper[5010]: I1126 17:09:00.547322 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 17:09:00 crc kubenswrapper[5010]: I1126 17:09:00.547405 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.205304 5010 generic.go:334] "Generic (PLEG): container finished" podID="d0f040c3-c2dd-4450-b3c9-934374ebaf3e" containerID="10b48c5486e07bdd2361e652cc81d7854e0a6c930b735c4748ef3039b454a796" exitCode=137 Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.205957 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d0f040c3-c2dd-4450-b3c9-934374ebaf3e","Type":"ContainerDied","Data":"10b48c5486e07bdd2361e652cc81d7854e0a6c930b735c4748ef3039b454a796"} Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.206019 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d0f040c3-c2dd-4450-b3c9-934374ebaf3e","Type":"ContainerDied","Data":"b206111797e7ae541b35b53306b9352c84f4d8b9649c353f8ef4592cdfd5a39e"} Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.206033 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b206111797e7ae541b35b53306b9352c84f4d8b9649c353f8ef4592cdfd5a39e" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.247063 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.356617 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-config-data\") pod \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.357455 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhchj\" (UniqueName: \"kubernetes.io/projected/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-kube-api-access-nhchj\") pod \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.357555 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-combined-ca-bundle\") pod \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\" (UID: \"d0f040c3-c2dd-4450-b3c9-934374ebaf3e\") " Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.369249 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-kube-api-access-nhchj" (OuterVolumeSpecName: "kube-api-access-nhchj") pod "d0f040c3-c2dd-4450-b3c9-934374ebaf3e" (UID: "d0f040c3-c2dd-4450-b3c9-934374ebaf3e"). InnerVolumeSpecName "kube-api-access-nhchj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.388517 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0f040c3-c2dd-4450-b3c9-934374ebaf3e" (UID: "d0f040c3-c2dd-4450-b3c9-934374ebaf3e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.406442 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-config-data" (OuterVolumeSpecName: "config-data") pod "d0f040c3-c2dd-4450-b3c9-934374ebaf3e" (UID: "d0f040c3-c2dd-4450-b3c9-934374ebaf3e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.460531 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.460569 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhchj\" (UniqueName: \"kubernetes.io/projected/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-kube-api-access-nhchj\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.460584 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0f040c3-c2dd-4450-b3c9-934374ebaf3e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.616898 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.109:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.617192 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.1.108:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.617227 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.109:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 17:09:01 crc kubenswrapper[5010]: I1126 17:09:01.617253 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.1.108:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.216955 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.259843 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.299741 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.316773 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:09:02 crc kubenswrapper[5010]: E1126 17:09:02.317641 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0f040c3-c2dd-4450-b3c9-934374ebaf3e" containerName="nova-scheduler-scheduler" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.317657 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0f040c3-c2dd-4450-b3c9-934374ebaf3e" containerName="nova-scheduler-scheduler" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.318077 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0f040c3-c2dd-4450-b3c9-934374ebaf3e" containerName="nova-scheduler-scheduler" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.318996 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.326032 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.332869 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.382767 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-config-data\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.382848 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.382997 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4bgn\" (UniqueName: \"kubernetes.io/projected/f7b955c7-f81c-41c1-aba7-75dac6c8281d-kube-api-access-d4bgn\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.484805 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-config-data\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.484869 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.484956 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4bgn\" (UniqueName: \"kubernetes.io/projected/f7b955c7-f81c-41c1-aba7-75dac6c8281d-kube-api-access-d4bgn\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.488853 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.494137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-config-data\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.506430 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4bgn\" (UniqueName: \"kubernetes.io/projected/f7b955c7-f81c-41c1-aba7-75dac6c8281d-kube-api-access-d4bgn\") pod \"nova-scheduler-0\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " pod="openstack/nova-scheduler-0" Nov 26 17:09:02 crc kubenswrapper[5010]: I1126 17:09:02.662773 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 17:09:03 crc kubenswrapper[5010]: I1126 17:09:03.133723 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 17:09:03 crc kubenswrapper[5010]: W1126 17:09:03.148029 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7b955c7_f81c_41c1_aba7_75dac6c8281d.slice/crio-a6db2911ebcf5a59aedeee43e823652448e72f9231624ffcfdf1c56a2bdbc7ea WatchSource:0}: Error finding container a6db2911ebcf5a59aedeee43e823652448e72f9231624ffcfdf1c56a2bdbc7ea: Status 404 returned error can't find the container with id a6db2911ebcf5a59aedeee43e823652448e72f9231624ffcfdf1c56a2bdbc7ea Nov 26 17:09:03 crc kubenswrapper[5010]: I1126 17:09:03.228502 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f7b955c7-f81c-41c1-aba7-75dac6c8281d","Type":"ContainerStarted","Data":"a6db2911ebcf5a59aedeee43e823652448e72f9231624ffcfdf1c56a2bdbc7ea"} Nov 26 17:09:03 crc kubenswrapper[5010]: I1126 17:09:03.914847 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0f040c3-c2dd-4450-b3c9-934374ebaf3e" path="/var/lib/kubelet/pods/d0f040c3-c2dd-4450-b3c9-934374ebaf3e/volumes" Nov 26 17:09:04 crc kubenswrapper[5010]: I1126 17:09:04.246294 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f7b955c7-f81c-41c1-aba7-75dac6c8281d","Type":"ContainerStarted","Data":"86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d"} Nov 26 17:09:06 crc kubenswrapper[5010]: I1126 17:09:06.475186 5010 scope.go:117] "RemoveContainer" containerID="4de2c5af96140abdae15985b70ab60fb8cc59491adbf8421572de0a86807fc50" Nov 26 17:09:06 crc kubenswrapper[5010]: I1126 17:09:06.505676 5010 scope.go:117] "RemoveContainer" containerID="c199513abd3b850396eeb0c05485b757b7f3232cc79b9e3e23076744c0285f9b" Nov 26 17:09:07 crc kubenswrapper[5010]: I1126 17:09:07.663965 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.522275 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.522684 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.523425 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.523494 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.555074 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.555139 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.560733 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.561089 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.562650 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.579072 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=8.579048952 podStartE2EDuration="8.579048952s" podCreationTimestamp="2025-11-26 17:09:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:09:04.268738221 +0000 UTC m=+6165.059455399" watchObservedRunningTime="2025-11-26 17:09:10.579048952 +0000 UTC m=+6171.369766130" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.808007 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-54d7c4984c-c8tbz"] Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.809702 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.853386 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-sb\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.853488 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-nb\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.853529 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-dns-svc\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.853562 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54d7c4984c-c8tbz"] Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.853612 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqw5x\" (UniqueName: \"kubernetes.io/projected/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-kube-api-access-fqw5x\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.853635 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-config\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.955900 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-nb\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.955987 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-dns-svc\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.956056 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqw5x\" (UniqueName: \"kubernetes.io/projected/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-kube-api-access-fqw5x\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.956082 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-config\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.956129 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-sb\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.957356 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-sb\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.957608 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-dns-svc\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.957701 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-nb\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.958172 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-config\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:10 crc kubenswrapper[5010]: I1126 17:09:10.980367 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqw5x\" (UniqueName: \"kubernetes.io/projected/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-kube-api-access-fqw5x\") pod \"dnsmasq-dns-54d7c4984c-c8tbz\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.137210 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.336168 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.422707 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.422779 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.422826 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.423581 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4b598b46f2cf6c5daaf375b8d9dc8672aba51e2bbf338cbfbf04472a425972f5"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.423649 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://4b598b46f2cf6c5daaf375b8d9dc8672aba51e2bbf338cbfbf04472a425972f5" gracePeriod=600 Nov 26 17:09:11 crc kubenswrapper[5010]: I1126 17:09:11.612239 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-54d7c4984c-c8tbz"] Nov 26 17:09:11 crc kubenswrapper[5010]: W1126 17:09:11.619155 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode4a27ccd_c8c3_499a_8ce2_463dd5e33842.slice/crio-90d590bd744ab7abab7d47657e1e9cc959ed49f7392eadd38aa54cc84c74fe01 WatchSource:0}: Error finding container 90d590bd744ab7abab7d47657e1e9cc959ed49f7392eadd38aa54cc84c74fe01: Status 404 returned error can't find the container with id 90d590bd744ab7abab7d47657e1e9cc959ed49f7392eadd38aa54cc84c74fe01 Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.339432 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="4b598b46f2cf6c5daaf375b8d9dc8672aba51e2bbf338cbfbf04472a425972f5" exitCode=0 Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.339535 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"4b598b46f2cf6c5daaf375b8d9dc8672aba51e2bbf338cbfbf04472a425972f5"} Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.340244 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f"} Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.340331 5010 scope.go:117] "RemoveContainer" containerID="1f5bf8ed211c58db58b1a3b01ac0093e1b55183305b624d65ecd0184dd646902" Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.342313 5010 generic.go:334] "Generic (PLEG): container finished" podID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerID="2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861" exitCode=0 Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.342366 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" event={"ID":"e4a27ccd-c8c3-499a-8ce2-463dd5e33842","Type":"ContainerDied","Data":"2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861"} Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.342415 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" event={"ID":"e4a27ccd-c8c3-499a-8ce2-463dd5e33842","Type":"ContainerStarted","Data":"90d590bd744ab7abab7d47657e1e9cc959ed49f7392eadd38aa54cc84c74fe01"} Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.663775 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 17:09:12 crc kubenswrapper[5010]: I1126 17:09:12.693822 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 17:09:13 crc kubenswrapper[5010]: I1126 17:09:13.364374 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" event={"ID":"e4a27ccd-c8c3-499a-8ce2-463dd5e33842","Type":"ContainerStarted","Data":"deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6"} Nov 26 17:09:13 crc kubenswrapper[5010]: I1126 17:09:13.364978 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:13 crc kubenswrapper[5010]: I1126 17:09:13.372482 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:09:13 crc kubenswrapper[5010]: I1126 17:09:13.372747 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-log" containerID="cri-o://800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05" gracePeriod=30 Nov 26 17:09:13 crc kubenswrapper[5010]: I1126 17:09:13.372883 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-api" containerID="cri-o://5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52" gracePeriod=30 Nov 26 17:09:13 crc kubenswrapper[5010]: I1126 17:09:13.394254 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" podStartSLOduration=3.39423409 podStartE2EDuration="3.39423409s" podCreationTimestamp="2025-11-26 17:09:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:09:13.384090797 +0000 UTC m=+6174.174807945" watchObservedRunningTime="2025-11-26 17:09:13.39423409 +0000 UTC m=+6174.184951258" Nov 26 17:09:13 crc kubenswrapper[5010]: I1126 17:09:13.426870 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 17:09:14 crc kubenswrapper[5010]: I1126 17:09:14.388247 5010 generic.go:334] "Generic (PLEG): container finished" podID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerID="800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05" exitCode=143 Nov 26 17:09:14 crc kubenswrapper[5010]: I1126 17:09:14.388344 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"43cbb13b-0c00-499f-96ee-6e815ce62895","Type":"ContainerDied","Data":"800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05"} Nov 26 17:09:16 crc kubenswrapper[5010]: I1126 17:09:16.958205 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:09:16 crc kubenswrapper[5010]: I1126 17:09:16.969108 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ctxt\" (UniqueName: \"kubernetes.io/projected/43cbb13b-0c00-499f-96ee-6e815ce62895-kube-api-access-5ctxt\") pod \"43cbb13b-0c00-499f-96ee-6e815ce62895\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " Nov 26 17:09:16 crc kubenswrapper[5010]: I1126 17:09:16.969817 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43cbb13b-0c00-499f-96ee-6e815ce62895-logs\") pod \"43cbb13b-0c00-499f-96ee-6e815ce62895\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " Nov 26 17:09:16 crc kubenswrapper[5010]: I1126 17:09:16.969841 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-combined-ca-bundle\") pod \"43cbb13b-0c00-499f-96ee-6e815ce62895\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " Nov 26 17:09:16 crc kubenswrapper[5010]: I1126 17:09:16.969915 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-config-data\") pod \"43cbb13b-0c00-499f-96ee-6e815ce62895\" (UID: \"43cbb13b-0c00-499f-96ee-6e815ce62895\") " Nov 26 17:09:16 crc kubenswrapper[5010]: I1126 17:09:16.970458 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43cbb13b-0c00-499f-96ee-6e815ce62895-logs" (OuterVolumeSpecName: "logs") pod "43cbb13b-0c00-499f-96ee-6e815ce62895" (UID: "43cbb13b-0c00-499f-96ee-6e815ce62895"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:09:16 crc kubenswrapper[5010]: I1126 17:09:16.979285 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43cbb13b-0c00-499f-96ee-6e815ce62895-kube-api-access-5ctxt" (OuterVolumeSpecName: "kube-api-access-5ctxt") pod "43cbb13b-0c00-499f-96ee-6e815ce62895" (UID: "43cbb13b-0c00-499f-96ee-6e815ce62895"). InnerVolumeSpecName "kube-api-access-5ctxt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.020825 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-config-data" (OuterVolumeSpecName: "config-data") pod "43cbb13b-0c00-499f-96ee-6e815ce62895" (UID: "43cbb13b-0c00-499f-96ee-6e815ce62895"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.026029 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43cbb13b-0c00-499f-96ee-6e815ce62895" (UID: "43cbb13b-0c00-499f-96ee-6e815ce62895"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.072222 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ctxt\" (UniqueName: \"kubernetes.io/projected/43cbb13b-0c00-499f-96ee-6e815ce62895-kube-api-access-5ctxt\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.072259 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/43cbb13b-0c00-499f-96ee-6e815ce62895-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.072270 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.072279 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43cbb13b-0c00-499f-96ee-6e815ce62895-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.421840 5010 generic.go:334] "Generic (PLEG): container finished" podID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerID="5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52" exitCode=0 Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.421915 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.421942 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"43cbb13b-0c00-499f-96ee-6e815ce62895","Type":"ContainerDied","Data":"5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52"} Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.422534 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"43cbb13b-0c00-499f-96ee-6e815ce62895","Type":"ContainerDied","Data":"0b042cc709fb6a5fbdfa033fbd30b5875a445ae6ea54852e5f8483e97a92161b"} Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.422617 5010 scope.go:117] "RemoveContainer" containerID="5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.476055 5010 scope.go:117] "RemoveContainer" containerID="800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.517970 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.545324 5010 scope.go:117] "RemoveContainer" containerID="5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52" Nov 26 17:09:17 crc kubenswrapper[5010]: E1126 17:09:17.548105 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52\": container with ID starting with 5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52 not found: ID does not exist" containerID="5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.548139 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52"} err="failed to get container status \"5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52\": rpc error: code = NotFound desc = could not find container \"5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52\": container with ID starting with 5b148ed3c694da3bdb9a990932b7a9a3ef78365bc98ade17d8846a2a14540c52 not found: ID does not exist" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.548161 5010 scope.go:117] "RemoveContainer" containerID="800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05" Nov 26 17:09:17 crc kubenswrapper[5010]: E1126 17:09:17.580120 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05\": container with ID starting with 800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05 not found: ID does not exist" containerID="800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.580173 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05"} err="failed to get container status \"800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05\": rpc error: code = NotFound desc = could not find container \"800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05\": container with ID starting with 800529e1e2aae61d2423ce5a1bbe79ba062bdd698c74d1967fe971335ddfcf05 not found: ID does not exist" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.590659 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.615782 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 17:09:17 crc kubenswrapper[5010]: E1126 17:09:17.616353 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-log" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.616381 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-log" Nov 26 17:09:17 crc kubenswrapper[5010]: E1126 17:09:17.616413 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-api" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.616420 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-api" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.616600 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-log" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.616624 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" containerName="nova-api-api" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.617649 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.620023 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.620227 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.620340 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.635178 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.796333 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.796406 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkdcm\" (UniqueName: \"kubernetes.io/projected/d552405f-057e-416b-9540-bf0f0f0d2b7b-kube-api-access-qkdcm\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.796763 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-config-data\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.797042 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.797100 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.797151 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d552405f-057e-416b-9540-bf0f0f0d2b7b-logs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.898287 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.898335 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.898363 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d552405f-057e-416b-9540-bf0f0f0d2b7b-logs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.898405 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.898427 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkdcm\" (UniqueName: \"kubernetes.io/projected/d552405f-057e-416b-9540-bf0f0f0d2b7b-kube-api-access-qkdcm\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.898478 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-config-data\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.899423 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d552405f-057e-416b-9540-bf0f0f0d2b7b-logs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.904390 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.906054 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-public-tls-certs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.908552 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-internal-tls-certs\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.911082 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43cbb13b-0c00-499f-96ee-6e815ce62895" path="/var/lib/kubelet/pods/43cbb13b-0c00-499f-96ee-6e815ce62895/volumes" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.923605 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-config-data\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.933083 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkdcm\" (UniqueName: \"kubernetes.io/projected/d552405f-057e-416b-9540-bf0f0f0d2b7b-kube-api-access-qkdcm\") pod \"nova-api-0\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " pod="openstack/nova-api-0" Nov 26 17:09:17 crc kubenswrapper[5010]: I1126 17:09:17.933883 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 17:09:18 crc kubenswrapper[5010]: I1126 17:09:18.459588 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 17:09:19 crc kubenswrapper[5010]: I1126 17:09:19.453403 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d552405f-057e-416b-9540-bf0f0f0d2b7b","Type":"ContainerStarted","Data":"1294c1a84215055821e00cdaa63179c1e469c4adae2aadcbb6d8a0fbdbf15af3"} Nov 26 17:09:19 crc kubenswrapper[5010]: I1126 17:09:19.454037 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d552405f-057e-416b-9540-bf0f0f0d2b7b","Type":"ContainerStarted","Data":"6708c1e4535a7b8f472b85b0cbb474bc683659dd9f4915f1d64fa4e939504f05"} Nov 26 17:09:19 crc kubenswrapper[5010]: I1126 17:09:19.454061 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d552405f-057e-416b-9540-bf0f0f0d2b7b","Type":"ContainerStarted","Data":"1b02597722483ce704776eb3a5d982f9ae0ca278a059d0b3d56b16f565cdd06f"} Nov 26 17:09:19 crc kubenswrapper[5010]: I1126 17:09:19.507983 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.507952404 podStartE2EDuration="2.507952404s" podCreationTimestamp="2025-11-26 17:09:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:09:19.495411972 +0000 UTC m=+6180.286129160" watchObservedRunningTime="2025-11-26 17:09:19.507952404 +0000 UTC m=+6180.298669592" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.139095 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.221735 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-ddc4c876c-vrnsj"] Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.221974 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" podUID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerName="dnsmasq-dns" containerID="cri-o://b22bbacce62f1ef27309c00b0e5589c6fcc2a2b403c687041c17075169e515f8" gracePeriod=10 Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.478202 5010 generic.go:334] "Generic (PLEG): container finished" podID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerID="b22bbacce62f1ef27309c00b0e5589c6fcc2a2b403c687041c17075169e515f8" exitCode=0 Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.478390 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" event={"ID":"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e","Type":"ContainerDied","Data":"b22bbacce62f1ef27309c00b0e5589c6fcc2a2b403c687041c17075169e515f8"} Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.672726 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.776751 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-248zx\" (UniqueName: \"kubernetes.io/projected/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-kube-api-access-248zx\") pod \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.776940 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-dns-svc\") pod \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.776971 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-nb\") pod \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.777127 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-sb\") pod \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.777152 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-config\") pod \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\" (UID: \"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e\") " Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.790097 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-kube-api-access-248zx" (OuterVolumeSpecName: "kube-api-access-248zx") pod "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" (UID: "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e"). InnerVolumeSpecName "kube-api-access-248zx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.830232 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" (UID: "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.833613 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" (UID: "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.842631 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" (UID: "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.842975 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-config" (OuterVolumeSpecName: "config") pod "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" (UID: "a14ea7c7-8798-4809-bc62-87e9bc5a2e5e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.879737 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-248zx\" (UniqueName: \"kubernetes.io/projected/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-kube-api-access-248zx\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.879771 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.879782 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.879794 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:21 crc kubenswrapper[5010]: I1126 17:09:21.879802 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:09:22 crc kubenswrapper[5010]: I1126 17:09:22.488616 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" event={"ID":"a14ea7c7-8798-4809-bc62-87e9bc5a2e5e","Type":"ContainerDied","Data":"4b6474994318eeccb3b23658bff61e1d851e96136275acc79662a5b6fc4080d4"} Nov 26 17:09:22 crc kubenswrapper[5010]: I1126 17:09:22.488691 5010 scope.go:117] "RemoveContainer" containerID="b22bbacce62f1ef27309c00b0e5589c6fcc2a2b403c687041c17075169e515f8" Nov 26 17:09:22 crc kubenswrapper[5010]: I1126 17:09:22.488742 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-ddc4c876c-vrnsj" Nov 26 17:09:22 crc kubenswrapper[5010]: I1126 17:09:22.511102 5010 scope.go:117] "RemoveContainer" containerID="73635e4fa68e1d7e76456bcd981ed5cd020f58a9595d23de8c7b72c0c21b87f0" Nov 26 17:09:22 crc kubenswrapper[5010]: I1126 17:09:22.521581 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-ddc4c876c-vrnsj"] Nov 26 17:09:22 crc kubenswrapper[5010]: I1126 17:09:22.529977 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-ddc4c876c-vrnsj"] Nov 26 17:09:23 crc kubenswrapper[5010]: I1126 17:09:23.914478 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" path="/var/lib/kubelet/pods/a14ea7c7-8798-4809-bc62-87e9bc5a2e5e/volumes" Nov 26 17:09:27 crc kubenswrapper[5010]: I1126 17:09:27.934908 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 17:09:27 crc kubenswrapper[5010]: I1126 17:09:27.935760 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 17:09:28 crc kubenswrapper[5010]: I1126 17:09:28.952287 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.112:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 17:09:28 crc kubenswrapper[5010]: I1126 17:09:28.952319 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.112:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 17:09:30 crc kubenswrapper[5010]: I1126 17:09:30.066742 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-gwm66"] Nov 26 17:09:30 crc kubenswrapper[5010]: I1126 17:09:30.077166 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-d61b-account-create-update-hprj5"] Nov 26 17:09:30 crc kubenswrapper[5010]: I1126 17:09:30.087927 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-gwm66"] Nov 26 17:09:30 crc kubenswrapper[5010]: I1126 17:09:30.096744 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-d61b-account-create-update-hprj5"] Nov 26 17:09:31 crc kubenswrapper[5010]: I1126 17:09:31.902531 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38d78a1f-0bd8-415c-a6b6-1158112ec0d9" path="/var/lib/kubelet/pods/38d78a1f-0bd8-415c-a6b6-1158112ec0d9/volumes" Nov 26 17:09:31 crc kubenswrapper[5010]: I1126 17:09:31.903637 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8ee05ae-14ec-43eb-930d-69c06e67a4d5" path="/var/lib/kubelet/pods/f8ee05ae-14ec-43eb-930d-69c06e67a4d5/volumes" Nov 26 17:09:37 crc kubenswrapper[5010]: I1126 17:09:37.944061 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 17:09:37 crc kubenswrapper[5010]: I1126 17:09:37.945256 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 17:09:37 crc kubenswrapper[5010]: I1126 17:09:37.946840 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 17:09:37 crc kubenswrapper[5010]: I1126 17:09:37.955882 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 17:09:38 crc kubenswrapper[5010]: I1126 17:09:38.681334 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 17:09:38 crc kubenswrapper[5010]: I1126 17:09:38.691667 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 17:09:40 crc kubenswrapper[5010]: I1126 17:09:40.035140 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-bffnt"] Nov 26 17:09:40 crc kubenswrapper[5010]: I1126 17:09:40.044008 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-bffnt"] Nov 26 17:09:41 crc kubenswrapper[5010]: I1126 17:09:41.906190 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cf03ab2-653c-44b2-b9f8-6ad3de0800ff" path="/var/lib/kubelet/pods/3cf03ab2-653c-44b2-b9f8-6ad3de0800ff/volumes" Nov 26 17:09:54 crc kubenswrapper[5010]: I1126 17:09:54.056513 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-sjln8"] Nov 26 17:09:54 crc kubenswrapper[5010]: I1126 17:09:54.070490 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-sjln8"] Nov 26 17:09:55 crc kubenswrapper[5010]: I1126 17:09:55.912497 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ae3a95e-b0ee-4483-b4e2-86f2824386d2" path="/var/lib/kubelet/pods/5ae3a95e-b0ee-4483-b4e2-86f2824386d2/volumes" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.114378 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-w9882"] Nov 26 17:09:57 crc kubenswrapper[5010]: E1126 17:09:57.115001 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerName="init" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.115015 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerName="init" Nov 26 17:09:57 crc kubenswrapper[5010]: E1126 17:09:57.115023 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerName="dnsmasq-dns" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.115030 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerName="dnsmasq-dns" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.115212 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a14ea7c7-8798-4809-bc62-87e9bc5a2e5e" containerName="dnsmasq-dns" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.115890 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.120542 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.120901 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-s798v" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.121073 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.145585 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w9882"] Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.206446 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jdmm\" (UniqueName: \"kubernetes.io/projected/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-kube-api-access-2jdmm\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.206516 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-run-ovn\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.206558 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-ovn-controller-tls-certs\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.206601 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-scripts\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.206617 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-combined-ca-bundle\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.206634 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-run\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.206730 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-log-ovn\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.269690 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-pbsm5"] Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.271572 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.291535 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-pbsm5"] Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309041 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-log-ovn\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309133 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jdmm\" (UniqueName: \"kubernetes.io/projected/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-kube-api-access-2jdmm\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309197 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-run-ovn\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309258 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-ovn-controller-tls-certs\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309333 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-scripts\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309362 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-combined-ca-bundle\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309397 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-run\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309764 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-run-ovn\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309856 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-log-ovn\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.309881 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-var-run\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.312139 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-scripts\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.318107 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-combined-ca-bundle\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.331521 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jdmm\" (UniqueName: \"kubernetes.io/projected/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-kube-api-access-2jdmm\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.334660 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/30cb9d89-279d-4bb8-bd1b-81e1dd58368a-ovn-controller-tls-certs\") pod \"ovn-controller-w9882\" (UID: \"30cb9d89-279d-4bb8-bd1b-81e1dd58368a\") " pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.413329 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwpvh\" (UniqueName: \"kubernetes.io/projected/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-kube-api-access-nwpvh\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.413403 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-log\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.413448 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-etc-ovs\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.413468 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-scripts\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.413687 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-run\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.413774 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-lib\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.442472 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w9882" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517213 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-log\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517579 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-log\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517625 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-etc-ovs\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517646 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-scripts\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517728 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-run\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517749 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-etc-ovs\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517754 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-lib\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517867 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-lib\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517947 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-var-run\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.517969 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwpvh\" (UniqueName: \"kubernetes.io/projected/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-kube-api-access-nwpvh\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.520680 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-scripts\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.540008 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwpvh\" (UniqueName: \"kubernetes.io/projected/dfa9a474-ac55-432d-9f63-9b6d4daa9af5-kube-api-access-nwpvh\") pod \"ovn-controller-ovs-pbsm5\" (UID: \"dfa9a474-ac55-432d-9f63-9b6d4daa9af5\") " pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.585925 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:09:57 crc kubenswrapper[5010]: I1126 17:09:57.950663 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w9882"] Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.486013 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-pbsm5"] Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.682662 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-49g6w"] Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.684319 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.687213 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.701306 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-49g6w"] Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.860782 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2bce34e3-639b-4cbb-97bb-5edc1650ad69-ovn-rundir\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.861174 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bce34e3-639b-4cbb-97bb-5edc1650ad69-config\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.861218 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwj4t\" (UniqueName: \"kubernetes.io/projected/2bce34e3-639b-4cbb-97bb-5edc1650ad69-kube-api-access-bwj4t\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.861256 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bce34e3-639b-4cbb-97bb-5edc1650ad69-combined-ca-bundle\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.861297 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bce34e3-639b-4cbb-97bb-5edc1650ad69-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.861352 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2bce34e3-639b-4cbb-97bb-5edc1650ad69-ovs-rundir\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.894174 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pbsm5" event={"ID":"dfa9a474-ac55-432d-9f63-9b6d4daa9af5","Type":"ContainerStarted","Data":"60d351781841a13044fbf024e9acb842e1e1c2f8c7b4efdfca05ed8df2af9a50"} Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.894213 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pbsm5" event={"ID":"dfa9a474-ac55-432d-9f63-9b6d4daa9af5","Type":"ContainerStarted","Data":"eeb323e6e8a63c99aa9919134fafb26d624e66a15e3ecbd21b96fde08818bdf6"} Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.897406 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w9882" event={"ID":"30cb9d89-279d-4bb8-bd1b-81e1dd58368a","Type":"ContainerStarted","Data":"3a07575ee8f7862fe5e276a6f6ab6db4a980377f2c0aed83149d0a929dad6d9a"} Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.897457 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w9882" event={"ID":"30cb9d89-279d-4bb8-bd1b-81e1dd58368a","Type":"ContainerStarted","Data":"997de799f5cd0f8951ba56f4427619b716a0c082baa220f55125b8d3d96f2c31"} Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.897561 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-w9882" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.957177 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-w9882" podStartSLOduration=1.957153254 podStartE2EDuration="1.957153254s" podCreationTimestamp="2025-11-26 17:09:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:09:58.936977852 +0000 UTC m=+6219.727695000" watchObservedRunningTime="2025-11-26 17:09:58.957153254 +0000 UTC m=+6219.747870402" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.965111 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2bce34e3-639b-4cbb-97bb-5edc1650ad69-ovn-rundir\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.965173 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bce34e3-639b-4cbb-97bb-5edc1650ad69-config\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.965237 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwj4t\" (UniqueName: \"kubernetes.io/projected/2bce34e3-639b-4cbb-97bb-5edc1650ad69-kube-api-access-bwj4t\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.965289 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bce34e3-639b-4cbb-97bb-5edc1650ad69-combined-ca-bundle\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.965345 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bce34e3-639b-4cbb-97bb-5edc1650ad69-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.965473 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2bce34e3-639b-4cbb-97bb-5edc1650ad69-ovs-rundir\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.967103 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/2bce34e3-639b-4cbb-97bb-5edc1650ad69-ovs-rundir\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.967248 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/2bce34e3-639b-4cbb-97bb-5edc1650ad69-ovn-rundir\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.968639 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bce34e3-639b-4cbb-97bb-5edc1650ad69-config\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.978654 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2bce34e3-639b-4cbb-97bb-5edc1650ad69-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.978835 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2bce34e3-639b-4cbb-97bb-5edc1650ad69-combined-ca-bundle\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:58 crc kubenswrapper[5010]: I1126 17:09:58.989076 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwj4t\" (UniqueName: \"kubernetes.io/projected/2bce34e3-639b-4cbb-97bb-5edc1650ad69-kube-api-access-bwj4t\") pod \"ovn-controller-metrics-49g6w\" (UID: \"2bce34e3-639b-4cbb-97bb-5edc1650ad69\") " pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.019599 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-49g6w" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.093800 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-create-t7vkz"] Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.095407 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.103275 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-t7vkz"] Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.278266 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-operator-scripts\") pod \"octavia-db-create-t7vkz\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.281273 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6wd6\" (UniqueName: \"kubernetes.io/projected/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-kube-api-access-n6wd6\") pod \"octavia-db-create-t7vkz\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.383122 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-operator-scripts\") pod \"octavia-db-create-t7vkz\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.383191 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6wd6\" (UniqueName: \"kubernetes.io/projected/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-kube-api-access-n6wd6\") pod \"octavia-db-create-t7vkz\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.383916 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-operator-scripts\") pod \"octavia-db-create-t7vkz\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.405410 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6wd6\" (UniqueName: \"kubernetes.io/projected/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-kube-api-access-n6wd6\") pod \"octavia-db-create-t7vkz\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.429182 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-t7vkz" Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.509585 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-49g6w"] Nov 26 17:09:59 crc kubenswrapper[5010]: W1126 17:09:59.922114 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c26ac4d_4244_4540_8a5e_8edc62cd6db7.slice/crio-fe49d5893d2de3262e86a31741277e0c7cf3db41062ee32df88e0fb28cf9fa08 WatchSource:0}: Error finding container fe49d5893d2de3262e86a31741277e0c7cf3db41062ee32df88e0fb28cf9fa08: Status 404 returned error can't find the container with id fe49d5893d2de3262e86a31741277e0c7cf3db41062ee32df88e0fb28cf9fa08 Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.930654 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-create-t7vkz"] Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.967173 5010 generic.go:334] "Generic (PLEG): container finished" podID="dfa9a474-ac55-432d-9f63-9b6d4daa9af5" containerID="60d351781841a13044fbf024e9acb842e1e1c2f8c7b4efdfca05ed8df2af9a50" exitCode=0 Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.967273 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pbsm5" event={"ID":"dfa9a474-ac55-432d-9f63-9b6d4daa9af5","Type":"ContainerDied","Data":"60d351781841a13044fbf024e9acb842e1e1c2f8c7b4efdfca05ed8df2af9a50"} Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.972704 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-49g6w" event={"ID":"2bce34e3-639b-4cbb-97bb-5edc1650ad69","Type":"ContainerStarted","Data":"0d34bb203555a2a5048b5da301b7f347f026f04bd78a5b87224c6a0b46bf6609"} Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.972766 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-49g6w" event={"ID":"2bce34e3-639b-4cbb-97bb-5edc1650ad69","Type":"ContainerStarted","Data":"9d1579c06dbc6110fe1abba67577ee8a7aea3aa551ec914ae2579eb749ed005f"} Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.986777 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-60a6-account-create-update-bxfcp"] Nov 26 17:09:59 crc kubenswrapper[5010]: I1126 17:09:59.988408 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:09:59.995817 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-db-secret" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.010760 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-60a6-account-create-update-bxfcp"] Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.039279 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-49g6w" podStartSLOduration=2.039260849 podStartE2EDuration="2.039260849s" podCreationTimestamp="2025-11-26 17:09:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:10:00.022760889 +0000 UTC m=+6220.813478047" watchObservedRunningTime="2025-11-26 17:10:00.039260849 +0000 UTC m=+6220.829977987" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.097724 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0be84c79-d2c2-4633-8f59-bcf7084e8101-operator-scripts\") pod \"octavia-60a6-account-create-update-bxfcp\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.098018 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wcp67\" (UniqueName: \"kubernetes.io/projected/0be84c79-d2c2-4633-8f59-bcf7084e8101-kube-api-access-wcp67\") pod \"octavia-60a6-account-create-update-bxfcp\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.202223 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wcp67\" (UniqueName: \"kubernetes.io/projected/0be84c79-d2c2-4633-8f59-bcf7084e8101-kube-api-access-wcp67\") pod \"octavia-60a6-account-create-update-bxfcp\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.202617 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0be84c79-d2c2-4633-8f59-bcf7084e8101-operator-scripts\") pod \"octavia-60a6-account-create-update-bxfcp\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.206026 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0be84c79-d2c2-4633-8f59-bcf7084e8101-operator-scripts\") pod \"octavia-60a6-account-create-update-bxfcp\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.236017 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wcp67\" (UniqueName: \"kubernetes.io/projected/0be84c79-d2c2-4633-8f59-bcf7084e8101-kube-api-access-wcp67\") pod \"octavia-60a6-account-create-update-bxfcp\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.318810 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.844028 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-60a6-account-create-update-bxfcp"] Nov 26 17:10:00 crc kubenswrapper[5010]: W1126 17:10:00.849787 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0be84c79_d2c2_4633_8f59_bcf7084e8101.slice/crio-79458dd8ca45c0ea3a994cd2c166a88967c22785cdc79ed0698651a0052925c9 WatchSource:0}: Error finding container 79458dd8ca45c0ea3a994cd2c166a88967c22785cdc79ed0698651a0052925c9: Status 404 returned error can't find the container with id 79458dd8ca45c0ea3a994cd2c166a88967c22785cdc79ed0698651a0052925c9 Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.985601 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-60a6-account-create-update-bxfcp" event={"ID":"0be84c79-d2c2-4633-8f59-bcf7084e8101","Type":"ContainerStarted","Data":"79458dd8ca45c0ea3a994cd2c166a88967c22785cdc79ed0698651a0052925c9"} Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.989442 5010 generic.go:334] "Generic (PLEG): container finished" podID="9c26ac4d-4244-4540-8a5e-8edc62cd6db7" containerID="f52930f175673c8f873764aa5f89b2931a37647312831370b490d61e98b56c63" exitCode=0 Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.989524 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-t7vkz" event={"ID":"9c26ac4d-4244-4540-8a5e-8edc62cd6db7","Type":"ContainerDied","Data":"f52930f175673c8f873764aa5f89b2931a37647312831370b490d61e98b56c63"} Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.989555 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-t7vkz" event={"ID":"9c26ac4d-4244-4540-8a5e-8edc62cd6db7","Type":"ContainerStarted","Data":"fe49d5893d2de3262e86a31741277e0c7cf3db41062ee32df88e0fb28cf9fa08"} Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.995921 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pbsm5" event={"ID":"dfa9a474-ac55-432d-9f63-9b6d4daa9af5","Type":"ContainerStarted","Data":"1499bade0d7acc72f65debec5e0b86f92f4e6e685b3df5c344b1f80c69fa0676"} Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.996003 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-pbsm5" event={"ID":"dfa9a474-ac55-432d-9f63-9b6d4daa9af5","Type":"ContainerStarted","Data":"6078c9a1ce378d73a628ff4c5de62653043d13b1f953d98b906cdafbaf9cd8b2"} Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.996054 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:10:00 crc kubenswrapper[5010]: I1126 17:10:00.996086 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:10:01 crc kubenswrapper[5010]: I1126 17:10:01.036524 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-pbsm5" podStartSLOduration=4.036504615 podStartE2EDuration="4.036504615s" podCreationTimestamp="2025-11-26 17:09:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:10:01.032760412 +0000 UTC m=+6221.823477590" watchObservedRunningTime="2025-11-26 17:10:01.036504615 +0000 UTC m=+6221.827221763" Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.004991 5010 generic.go:334] "Generic (PLEG): container finished" podID="0be84c79-d2c2-4633-8f59-bcf7084e8101" containerID="d04f99ef320f57f5bcff4a47246d3e498ce9a44408c4011c353a333a7c3502d3" exitCode=0 Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.005164 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-60a6-account-create-update-bxfcp" event={"ID":"0be84c79-d2c2-4633-8f59-bcf7084e8101","Type":"ContainerDied","Data":"d04f99ef320f57f5bcff4a47246d3e498ce9a44408c4011c353a333a7c3502d3"} Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.378638 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-t7vkz" Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.548683 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-operator-scripts\") pod \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.548854 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6wd6\" (UniqueName: \"kubernetes.io/projected/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-kube-api-access-n6wd6\") pod \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\" (UID: \"9c26ac4d-4244-4540-8a5e-8edc62cd6db7\") " Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.549215 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9c26ac4d-4244-4540-8a5e-8edc62cd6db7" (UID: "9c26ac4d-4244-4540-8a5e-8edc62cd6db7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.549565 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.554609 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-kube-api-access-n6wd6" (OuterVolumeSpecName: "kube-api-access-n6wd6") pod "9c26ac4d-4244-4540-8a5e-8edc62cd6db7" (UID: "9c26ac4d-4244-4540-8a5e-8edc62cd6db7"). InnerVolumeSpecName "kube-api-access-n6wd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:10:02 crc kubenswrapper[5010]: I1126 17:10:02.651166 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6wd6\" (UniqueName: \"kubernetes.io/projected/9c26ac4d-4244-4540-8a5e-8edc62cd6db7-kube-api-access-n6wd6\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.025840 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-create-t7vkz" Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.025852 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-create-t7vkz" event={"ID":"9c26ac4d-4244-4540-8a5e-8edc62cd6db7","Type":"ContainerDied","Data":"fe49d5893d2de3262e86a31741277e0c7cf3db41062ee32df88e0fb28cf9fa08"} Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.025926 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe49d5893d2de3262e86a31741277e0c7cf3db41062ee32df88e0fb28cf9fa08" Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.463966 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.568116 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0be84c79-d2c2-4633-8f59-bcf7084e8101-operator-scripts\") pod \"0be84c79-d2c2-4633-8f59-bcf7084e8101\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.568500 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wcp67\" (UniqueName: \"kubernetes.io/projected/0be84c79-d2c2-4633-8f59-bcf7084e8101-kube-api-access-wcp67\") pod \"0be84c79-d2c2-4633-8f59-bcf7084e8101\" (UID: \"0be84c79-d2c2-4633-8f59-bcf7084e8101\") " Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.570238 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0be84c79-d2c2-4633-8f59-bcf7084e8101-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0be84c79-d2c2-4633-8f59-bcf7084e8101" (UID: "0be84c79-d2c2-4633-8f59-bcf7084e8101"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.572774 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0be84c79-d2c2-4633-8f59-bcf7084e8101-kube-api-access-wcp67" (OuterVolumeSpecName: "kube-api-access-wcp67") pod "0be84c79-d2c2-4633-8f59-bcf7084e8101" (UID: "0be84c79-d2c2-4633-8f59-bcf7084e8101"). InnerVolumeSpecName "kube-api-access-wcp67". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.671630 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0be84c79-d2c2-4633-8f59-bcf7084e8101-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:03 crc kubenswrapper[5010]: I1126 17:10:03.671755 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wcp67\" (UniqueName: \"kubernetes.io/projected/0be84c79-d2c2-4633-8f59-bcf7084e8101-kube-api-access-wcp67\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:04 crc kubenswrapper[5010]: I1126 17:10:04.040648 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-60a6-account-create-update-bxfcp" event={"ID":"0be84c79-d2c2-4633-8f59-bcf7084e8101","Type":"ContainerDied","Data":"79458dd8ca45c0ea3a994cd2c166a88967c22785cdc79ed0698651a0052925c9"} Nov 26 17:10:04 crc kubenswrapper[5010]: I1126 17:10:04.040685 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79458dd8ca45c0ea3a994cd2c166a88967c22785cdc79ed0698651a0052925c9" Nov 26 17:10:04 crc kubenswrapper[5010]: I1126 17:10:04.040686 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-60a6-account-create-update-bxfcp" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.708803 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-persistence-db-create-6dnnh"] Nov 26 17:10:05 crc kubenswrapper[5010]: E1126 17:10:05.709820 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c26ac4d-4244-4540-8a5e-8edc62cd6db7" containerName="mariadb-database-create" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.709842 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c26ac4d-4244-4540-8a5e-8edc62cd6db7" containerName="mariadb-database-create" Nov 26 17:10:05 crc kubenswrapper[5010]: E1126 17:10:05.709871 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0be84c79-d2c2-4633-8f59-bcf7084e8101" containerName="mariadb-account-create-update" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.709881 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0be84c79-d2c2-4633-8f59-bcf7084e8101" containerName="mariadb-account-create-update" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.710151 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c26ac4d-4244-4540-8a5e-8edc62cd6db7" containerName="mariadb-database-create" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.710189 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0be84c79-d2c2-4633-8f59-bcf7084e8101" containerName="mariadb-account-create-update" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.711149 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.781874 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-6dnnh"] Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.818007 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8687aeb-d22b-4f26-bbb8-24728c45ae09-operator-scripts\") pod \"octavia-persistence-db-create-6dnnh\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.818269 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbzf7\" (UniqueName: \"kubernetes.io/projected/e8687aeb-d22b-4f26-bbb8-24728c45ae09-kube-api-access-nbzf7\") pod \"octavia-persistence-db-create-6dnnh\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.920602 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8687aeb-d22b-4f26-bbb8-24728c45ae09-operator-scripts\") pod \"octavia-persistence-db-create-6dnnh\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.920772 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbzf7\" (UniqueName: \"kubernetes.io/projected/e8687aeb-d22b-4f26-bbb8-24728c45ae09-kube-api-access-nbzf7\") pod \"octavia-persistence-db-create-6dnnh\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.921492 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8687aeb-d22b-4f26-bbb8-24728c45ae09-operator-scripts\") pod \"octavia-persistence-db-create-6dnnh\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:05 crc kubenswrapper[5010]: I1126 17:10:05.948260 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbzf7\" (UniqueName: \"kubernetes.io/projected/e8687aeb-d22b-4f26-bbb8-24728c45ae09-kube-api-access-nbzf7\") pod \"octavia-persistence-db-create-6dnnh\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:06 crc kubenswrapper[5010]: I1126 17:10:06.060843 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:06 crc kubenswrapper[5010]: I1126 17:10:06.521120 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-persistence-db-create-6dnnh"] Nov 26 17:10:06 crc kubenswrapper[5010]: I1126 17:10:06.736486 5010 scope.go:117] "RemoveContainer" containerID="9541f30bfd2a5bc53a5577924e54d858d24f1d77e4911ec177d4d853416cd676" Nov 26 17:10:06 crc kubenswrapper[5010]: I1126 17:10:06.782475 5010 scope.go:117] "RemoveContainer" containerID="c936a1ea7e79564b5a3b3e925e771dc887132c9bc0eb90f35c5462c5c1e68aea" Nov 26 17:10:06 crc kubenswrapper[5010]: I1126 17:10:06.821570 5010 scope.go:117] "RemoveContainer" containerID="aab7743c11c6ef865798bf865aefa47f7eeffc1b0b85d86acf5f153684df6ad5" Nov 26 17:10:06 crc kubenswrapper[5010]: I1126 17:10:06.851070 5010 scope.go:117] "RemoveContainer" containerID="0446aebb1fa399da149bd9602b95ef5ff3303b9b9216da9fd26093ae055b58ea" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.033398 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-1499-account-create-update-zwbjm"] Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.035388 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.037482 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-persistence-db-secret" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.042404 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-1499-account-create-update-zwbjm"] Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.070082 5010 generic.go:334] "Generic (PLEG): container finished" podID="e8687aeb-d22b-4f26-bbb8-24728c45ae09" containerID="ffea1240c5a23741022f28dcbd1610ee339cd5814551280648386730b18f812d" exitCode=0 Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.070438 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-6dnnh" event={"ID":"e8687aeb-d22b-4f26-bbb8-24728c45ae09","Type":"ContainerDied","Data":"ffea1240c5a23741022f28dcbd1610ee339cd5814551280648386730b18f812d"} Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.070466 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-6dnnh" event={"ID":"e8687aeb-d22b-4f26-bbb8-24728c45ae09","Type":"ContainerStarted","Data":"46d7d420e0e4dfe22fc232b5cd5cf5286371d84349d89181069039176a1c529a"} Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.156007 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fvxk5\" (UniqueName: \"kubernetes.io/projected/371275a0-39e1-4c5c-a68a-44c3e50d5998-kube-api-access-fvxk5\") pod \"octavia-1499-account-create-update-zwbjm\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.156057 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/371275a0-39e1-4c5c-a68a-44c3e50d5998-operator-scripts\") pod \"octavia-1499-account-create-update-zwbjm\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.257348 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fvxk5\" (UniqueName: \"kubernetes.io/projected/371275a0-39e1-4c5c-a68a-44c3e50d5998-kube-api-access-fvxk5\") pod \"octavia-1499-account-create-update-zwbjm\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.257412 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/371275a0-39e1-4c5c-a68a-44c3e50d5998-operator-scripts\") pod \"octavia-1499-account-create-update-zwbjm\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.258174 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/371275a0-39e1-4c5c-a68a-44c3e50d5998-operator-scripts\") pod \"octavia-1499-account-create-update-zwbjm\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.275447 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fvxk5\" (UniqueName: \"kubernetes.io/projected/371275a0-39e1-4c5c-a68a-44c3e50d5998-kube-api-access-fvxk5\") pod \"octavia-1499-account-create-update-zwbjm\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.405477 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:07 crc kubenswrapper[5010]: I1126 17:10:07.854188 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-1499-account-create-update-zwbjm"] Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.081286 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-1499-account-create-update-zwbjm" event={"ID":"371275a0-39e1-4c5c-a68a-44c3e50d5998","Type":"ContainerStarted","Data":"24eb302e4e2aa089f09b565339ca8e21b763633539189a951f7dfbe6eb80f11e"} Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.081324 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-1499-account-create-update-zwbjm" event={"ID":"371275a0-39e1-4c5c-a68a-44c3e50d5998","Type":"ContainerStarted","Data":"179a800859c9ec888cb88474cc8e75c2b8463c247fc7feebc79268b7e7b8423e"} Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.122819 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-1499-account-create-update-zwbjm" podStartSLOduration=1.12276477 podStartE2EDuration="1.12276477s" podCreationTimestamp="2025-11-26 17:10:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:10:08.121208162 +0000 UTC m=+6228.911925350" watchObservedRunningTime="2025-11-26 17:10:08.12276477 +0000 UTC m=+6228.913481928" Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.569144 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.586825 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nbzf7\" (UniqueName: \"kubernetes.io/projected/e8687aeb-d22b-4f26-bbb8-24728c45ae09-kube-api-access-nbzf7\") pod \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.587112 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8687aeb-d22b-4f26-bbb8-24728c45ae09-operator-scripts\") pod \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\" (UID: \"e8687aeb-d22b-4f26-bbb8-24728c45ae09\") " Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.587607 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8687aeb-d22b-4f26-bbb8-24728c45ae09-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8687aeb-d22b-4f26-bbb8-24728c45ae09" (UID: "e8687aeb-d22b-4f26-bbb8-24728c45ae09"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.587823 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8687aeb-d22b-4f26-bbb8-24728c45ae09-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.592807 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8687aeb-d22b-4f26-bbb8-24728c45ae09-kube-api-access-nbzf7" (OuterVolumeSpecName: "kube-api-access-nbzf7") pod "e8687aeb-d22b-4f26-bbb8-24728c45ae09" (UID: "e8687aeb-d22b-4f26-bbb8-24728c45ae09"). InnerVolumeSpecName "kube-api-access-nbzf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:10:08 crc kubenswrapper[5010]: I1126 17:10:08.689680 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nbzf7\" (UniqueName: \"kubernetes.io/projected/e8687aeb-d22b-4f26-bbb8-24728c45ae09-kube-api-access-nbzf7\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:09 crc kubenswrapper[5010]: I1126 17:10:09.100270 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-persistence-db-create-6dnnh" event={"ID":"e8687aeb-d22b-4f26-bbb8-24728c45ae09","Type":"ContainerDied","Data":"46d7d420e0e4dfe22fc232b5cd5cf5286371d84349d89181069039176a1c529a"} Nov 26 17:10:09 crc kubenswrapper[5010]: I1126 17:10:09.101599 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46d7d420e0e4dfe22fc232b5cd5cf5286371d84349d89181069039176a1c529a" Nov 26 17:10:09 crc kubenswrapper[5010]: I1126 17:10:09.100322 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-persistence-db-create-6dnnh" Nov 26 17:10:09 crc kubenswrapper[5010]: I1126 17:10:09.102576 5010 generic.go:334] "Generic (PLEG): container finished" podID="371275a0-39e1-4c5c-a68a-44c3e50d5998" containerID="24eb302e4e2aa089f09b565339ca8e21b763633539189a951f7dfbe6eb80f11e" exitCode=0 Nov 26 17:10:09 crc kubenswrapper[5010]: I1126 17:10:09.102901 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-1499-account-create-update-zwbjm" event={"ID":"371275a0-39e1-4c5c-a68a-44c3e50d5998","Type":"ContainerDied","Data":"24eb302e4e2aa089f09b565339ca8e21b763633539189a951f7dfbe6eb80f11e"} Nov 26 17:10:10 crc kubenswrapper[5010]: I1126 17:10:10.514025 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:10 crc kubenswrapper[5010]: I1126 17:10:10.525538 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fvxk5\" (UniqueName: \"kubernetes.io/projected/371275a0-39e1-4c5c-a68a-44c3e50d5998-kube-api-access-fvxk5\") pod \"371275a0-39e1-4c5c-a68a-44c3e50d5998\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " Nov 26 17:10:10 crc kubenswrapper[5010]: I1126 17:10:10.525682 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/371275a0-39e1-4c5c-a68a-44c3e50d5998-operator-scripts\") pod \"371275a0-39e1-4c5c-a68a-44c3e50d5998\" (UID: \"371275a0-39e1-4c5c-a68a-44c3e50d5998\") " Nov 26 17:10:10 crc kubenswrapper[5010]: I1126 17:10:10.526256 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/371275a0-39e1-4c5c-a68a-44c3e50d5998-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "371275a0-39e1-4c5c-a68a-44c3e50d5998" (UID: "371275a0-39e1-4c5c-a68a-44c3e50d5998"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:10:10 crc kubenswrapper[5010]: I1126 17:10:10.531501 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/371275a0-39e1-4c5c-a68a-44c3e50d5998-kube-api-access-fvxk5" (OuterVolumeSpecName: "kube-api-access-fvxk5") pod "371275a0-39e1-4c5c-a68a-44c3e50d5998" (UID: "371275a0-39e1-4c5c-a68a-44c3e50d5998"). InnerVolumeSpecName "kube-api-access-fvxk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:10:10 crc kubenswrapper[5010]: I1126 17:10:10.627325 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fvxk5\" (UniqueName: \"kubernetes.io/projected/371275a0-39e1-4c5c-a68a-44c3e50d5998-kube-api-access-fvxk5\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:10 crc kubenswrapper[5010]: I1126 17:10:10.627357 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/371275a0-39e1-4c5c-a68a-44c3e50d5998-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:11 crc kubenswrapper[5010]: I1126 17:10:11.126757 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-1499-account-create-update-zwbjm" event={"ID":"371275a0-39e1-4c5c-a68a-44c3e50d5998","Type":"ContainerDied","Data":"179a800859c9ec888cb88474cc8e75c2b8463c247fc7feebc79268b7e7b8423e"} Nov 26 17:10:11 crc kubenswrapper[5010]: I1126 17:10:11.126808 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="179a800859c9ec888cb88474cc8e75c2b8463c247fc7feebc79268b7e7b8423e" Nov 26 17:10:11 crc kubenswrapper[5010]: I1126 17:10:11.126914 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-1499-account-create-update-zwbjm" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.857715 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-67c9d644dd-m7cx4"] Nov 26 17:10:12 crc kubenswrapper[5010]: E1126 17:10:12.871217 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="371275a0-39e1-4c5c-a68a-44c3e50d5998" containerName="mariadb-account-create-update" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.871256 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="371275a0-39e1-4c5c-a68a-44c3e50d5998" containerName="mariadb-account-create-update" Nov 26 17:10:12 crc kubenswrapper[5010]: E1126 17:10:12.871329 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8687aeb-d22b-4f26-bbb8-24728c45ae09" containerName="mariadb-database-create" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.871338 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8687aeb-d22b-4f26-bbb8-24728c45ae09" containerName="mariadb-database-create" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.872133 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="371275a0-39e1-4c5c-a68a-44c3e50d5998" containerName="mariadb-account-create-update" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.872204 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8687aeb-d22b-4f26-bbb8-24728c45ae09" containerName="mariadb-database-create" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.899936 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.904365 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-scripts" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.904906 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-octavia-dockercfg-l9fhm" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.905630 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-ovndbs" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.915346 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-api-config-data" Nov 26 17:10:12 crc kubenswrapper[5010]: I1126 17:10:12.967658 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-67c9d644dd-m7cx4"] Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.012245 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-config-data-merged\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.012292 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-ovndb-tls-certs\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.012313 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-combined-ca-bundle\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.012349 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-config-data\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.012372 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-scripts\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.012391 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-octavia-run\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.114163 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-config-data-merged\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.114472 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-ovndb-tls-certs\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.114640 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-config-data-merged\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.114502 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-combined-ca-bundle\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.115525 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-config-data\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.115559 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-scripts\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.115586 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-octavia-run\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.116063 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-octavia-run\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.121183 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-config-data\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.121574 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-scripts\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.122302 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-ovndb-tls-certs\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.139454 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-combined-ca-bundle\") pod \"octavia-api-67c9d644dd-m7cx4\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.302274 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.887839 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-67c9d644dd-m7cx4"] Nov 26 17:10:13 crc kubenswrapper[5010]: W1126 17:10:13.889234 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod215d6716_e893_4fb1_846c_4538c12c888a.slice/crio-163aa2241ac7b0feb0b83380deb6e012c3bb643a31aa9b8b30d1c061309b9eaf WatchSource:0}: Error finding container 163aa2241ac7b0feb0b83380deb6e012c3bb643a31aa9b8b30d1c061309b9eaf: Status 404 returned error can't find the container with id 163aa2241ac7b0feb0b83380deb6e012c3bb643a31aa9b8b30d1c061309b9eaf Nov 26 17:10:13 crc kubenswrapper[5010]: I1126 17:10:13.892577 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:10:14 crc kubenswrapper[5010]: I1126 17:10:14.159782 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-67c9d644dd-m7cx4" event={"ID":"215d6716-e893-4fb1-846c-4538c12c888a","Type":"ContainerStarted","Data":"163aa2241ac7b0feb0b83380deb6e012c3bb643a31aa9b8b30d1c061309b9eaf"} Nov 26 17:10:25 crc kubenswrapper[5010]: I1126 17:10:25.272054 5010 generic.go:334] "Generic (PLEG): container finished" podID="215d6716-e893-4fb1-846c-4538c12c888a" containerID="a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89" exitCode=0 Nov 26 17:10:25 crc kubenswrapper[5010]: I1126 17:10:25.272164 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-67c9d644dd-m7cx4" event={"ID":"215d6716-e893-4fb1-846c-4538c12c888a","Type":"ContainerDied","Data":"a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89"} Nov 26 17:10:26 crc kubenswrapper[5010]: I1126 17:10:26.298537 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-67c9d644dd-m7cx4" event={"ID":"215d6716-e893-4fb1-846c-4538c12c888a","Type":"ContainerStarted","Data":"67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369"} Nov 26 17:10:26 crc kubenswrapper[5010]: I1126 17:10:26.298912 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-67c9d644dd-m7cx4" event={"ID":"215d6716-e893-4fb1-846c-4538c12c888a","Type":"ContainerStarted","Data":"425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282"} Nov 26 17:10:26 crc kubenswrapper[5010]: I1126 17:10:26.300162 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:26 crc kubenswrapper[5010]: I1126 17:10:26.300196 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:26 crc kubenswrapper[5010]: I1126 17:10:26.333522 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-67c9d644dd-m7cx4" podStartSLOduration=3.893957606 podStartE2EDuration="14.333503949s" podCreationTimestamp="2025-11-26 17:10:12 +0000 UTC" firstStartedPulling="2025-11-26 17:10:13.892291096 +0000 UTC m=+6234.683008244" lastFinishedPulling="2025-11-26 17:10:24.331837439 +0000 UTC m=+6245.122554587" observedRunningTime="2025-11-26 17:10:26.324746952 +0000 UTC m=+6247.115464140" watchObservedRunningTime="2025-11-26 17:10:26.333503949 +0000 UTC m=+6247.124221097" Nov 26 17:10:32 crc kubenswrapper[5010]: I1126 17:10:32.493946 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-w9882" Nov 26 17:10:32 crc kubenswrapper[5010]: I1126 17:10:32.649648 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:10:32 crc kubenswrapper[5010]: I1126 17:10:32.649928 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-pbsm5" Nov 26 17:10:32 crc kubenswrapper[5010]: I1126 17:10:32.971444 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-w9882-config-jjsc6"] Nov 26 17:10:32 crc kubenswrapper[5010]: I1126 17:10:32.972934 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:32 crc kubenswrapper[5010]: I1126 17:10:32.976359 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 26 17:10:32 crc kubenswrapper[5010]: I1126 17:10:32.982326 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w9882-config-jjsc6"] Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.054822 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-additional-scripts\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.055102 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pd9s\" (UniqueName: \"kubernetes.io/projected/37445cb2-a024-4623-99c8-afac9218712a-kube-api-access-9pd9s\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.055196 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.055320 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run-ovn\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.055420 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-scripts\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.055537 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-log-ovn\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.157810 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run-ovn\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.157858 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-scripts\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.157920 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-log-ovn\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.158007 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-additional-scripts\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.158032 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pd9s\" (UniqueName: \"kubernetes.io/projected/37445cb2-a024-4623-99c8-afac9218712a-kube-api-access-9pd9s\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.158239 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.158559 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.158600 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run-ovn\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.158845 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-log-ovn\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.159228 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-additional-scripts\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.160852 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-scripts\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.186953 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pd9s\" (UniqueName: \"kubernetes.io/projected/37445cb2-a024-4623-99c8-afac9218712a-kube-api-access-9pd9s\") pod \"ovn-controller-w9882-config-jjsc6\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.299679 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:33 crc kubenswrapper[5010]: I1126 17:10:33.839527 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-w9882-config-jjsc6"] Nov 26 17:10:33 crc kubenswrapper[5010]: W1126 17:10:33.841022 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37445cb2_a024_4623_99c8_afac9218712a.slice/crio-a177287799eb3687c8fea32f1e4027023b0ac2b5a2f5a6d46baf36e126209160 WatchSource:0}: Error finding container a177287799eb3687c8fea32f1e4027023b0ac2b5a2f5a6d46baf36e126209160: Status 404 returned error can't find the container with id a177287799eb3687c8fea32f1e4027023b0ac2b5a2f5a6d46baf36e126209160 Nov 26 17:10:34 crc kubenswrapper[5010]: I1126 17:10:34.442520 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w9882-config-jjsc6" event={"ID":"37445cb2-a024-4623-99c8-afac9218712a","Type":"ContainerStarted","Data":"12153338c30a6ab6210e5532050d36947e6fe2427df55189f3a9fbd8d0f3bea5"} Nov 26 17:10:34 crc kubenswrapper[5010]: I1126 17:10:34.443011 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w9882-config-jjsc6" event={"ID":"37445cb2-a024-4623-99c8-afac9218712a","Type":"ContainerStarted","Data":"a177287799eb3687c8fea32f1e4027023b0ac2b5a2f5a6d46baf36e126209160"} Nov 26 17:10:34 crc kubenswrapper[5010]: I1126 17:10:34.471450 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-w9882-config-jjsc6" podStartSLOduration=2.471429844 podStartE2EDuration="2.471429844s" podCreationTimestamp="2025-11-26 17:10:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:10:34.45879443 +0000 UTC m=+6255.249511618" watchObservedRunningTime="2025-11-26 17:10:34.471429844 +0000 UTC m=+6255.262146992" Nov 26 17:10:35 crc kubenswrapper[5010]: I1126 17:10:35.474310 5010 generic.go:334] "Generic (PLEG): container finished" podID="37445cb2-a024-4623-99c8-afac9218712a" containerID="12153338c30a6ab6210e5532050d36947e6fe2427df55189f3a9fbd8d0f3bea5" exitCode=0 Nov 26 17:10:35 crc kubenswrapper[5010]: I1126 17:10:35.474401 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w9882-config-jjsc6" event={"ID":"37445cb2-a024-4623-99c8-afac9218712a","Type":"ContainerDied","Data":"12153338c30a6ab6210e5532050d36947e6fe2427df55189f3a9fbd8d0f3bea5"} Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.911288 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.951575 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-additional-scripts\") pod \"37445cb2-a024-4623-99c8-afac9218712a\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.951773 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run-ovn\") pod \"37445cb2-a024-4623-99c8-afac9218712a\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.952056 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pd9s\" (UniqueName: \"kubernetes.io/projected/37445cb2-a024-4623-99c8-afac9218712a-kube-api-access-9pd9s\") pod \"37445cb2-a024-4623-99c8-afac9218712a\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.952347 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-scripts\") pod \"37445cb2-a024-4623-99c8-afac9218712a\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.954035 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-log-ovn\") pod \"37445cb2-a024-4623-99c8-afac9218712a\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.954089 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run\") pod \"37445cb2-a024-4623-99c8-afac9218712a\" (UID: \"37445cb2-a024-4623-99c8-afac9218712a\") " Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.953984 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "37445cb2-a024-4623-99c8-afac9218712a" (UID: "37445cb2-a024-4623-99c8-afac9218712a"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.955752 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "37445cb2-a024-4623-99c8-afac9218712a" (UID: "37445cb2-a024-4623-99c8-afac9218712a"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.955822 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run" (OuterVolumeSpecName: "var-run") pod "37445cb2-a024-4623-99c8-afac9218712a" (UID: "37445cb2-a024-4623-99c8-afac9218712a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.956365 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-scripts" (OuterVolumeSpecName: "scripts") pod "37445cb2-a024-4623-99c8-afac9218712a" (UID: "37445cb2-a024-4623-99c8-afac9218712a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.963727 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37445cb2-a024-4623-99c8-afac9218712a-kube-api-access-9pd9s" (OuterVolumeSpecName: "kube-api-access-9pd9s") pod "37445cb2-a024-4623-99c8-afac9218712a" (UID: "37445cb2-a024-4623-99c8-afac9218712a"). InnerVolumeSpecName "kube-api-access-9pd9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:10:36 crc kubenswrapper[5010]: I1126 17:10:36.968357 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "37445cb2-a024-4623-99c8-afac9218712a" (UID: "37445cb2-a024-4623-99c8-afac9218712a"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.057363 5010 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.057406 5010 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.057421 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pd9s\" (UniqueName: \"kubernetes.io/projected/37445cb2-a024-4623-99c8-afac9218712a-kube-api-access-9pd9s\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.057435 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/37445cb2-a024-4623-99c8-afac9218712a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.057446 5010 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.057457 5010 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/37445cb2-a024-4623-99c8-afac9218712a-var-run\") on node \"crc\" DevicePath \"\"" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.503009 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-w9882-config-jjsc6" event={"ID":"37445cb2-a024-4623-99c8-afac9218712a","Type":"ContainerDied","Data":"a177287799eb3687c8fea32f1e4027023b0ac2b5a2f5a6d46baf36e126209160"} Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.503050 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a177287799eb3687c8fea32f1e4027023b0ac2b5a2f5a6d46baf36e126209160" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.503107 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-w9882-config-jjsc6" Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.560325 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-w9882-config-jjsc6"] Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.576909 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-w9882-config-jjsc6"] Nov 26 17:10:37 crc kubenswrapper[5010]: I1126 17:10:37.914523 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37445cb2-a024-4623-99c8-afac9218712a" path="/var/lib/kubelet/pods/37445cb2-a024-4623-99c8-afac9218712a/volumes" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.045782 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-rsyslog-qwndp"] Nov 26 17:10:42 crc kubenswrapper[5010]: E1126 17:10:42.046497 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37445cb2-a024-4623-99c8-afac9218712a" containerName="ovn-config" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.046510 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="37445cb2-a024-4623-99c8-afac9218712a" containerName="ovn-config" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.046721 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="37445cb2-a024-4623-99c8-afac9218712a" containerName="ovn-config" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.047726 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.051701 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-config-data" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.051909 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"octavia-hmport-map" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.051943 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-rsyslog-scripts" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.057148 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-qwndp"] Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.180831 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ed61fb-8390-4ee1-a052-332b2bfdb369-config-data\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.180873 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ed61fb-8390-4ee1-a052-332b2bfdb369-scripts\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.180990 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/04ed61fb-8390-4ee1-a052-332b2bfdb369-config-data-merged\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.181202 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/04ed61fb-8390-4ee1-a052-332b2bfdb369-hm-ports\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.282872 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/04ed61fb-8390-4ee1-a052-332b2bfdb369-config-data-merged\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.282963 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/04ed61fb-8390-4ee1-a052-332b2bfdb369-hm-ports\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.283020 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ed61fb-8390-4ee1-a052-332b2bfdb369-config-data\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.283039 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ed61fb-8390-4ee1-a052-332b2bfdb369-scripts\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.283514 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/04ed61fb-8390-4ee1-a052-332b2bfdb369-config-data-merged\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.284378 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/04ed61fb-8390-4ee1-a052-332b2bfdb369-hm-ports\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.289072 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/04ed61fb-8390-4ee1-a052-332b2bfdb369-config-data\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.289544 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/04ed61fb-8390-4ee1-a052-332b2bfdb369-scripts\") pod \"octavia-rsyslog-qwndp\" (UID: \"04ed61fb-8390-4ee1-a052-332b2bfdb369\") " pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.383241 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.880773 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-5955f5554b-vwt8b"] Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.883056 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.885962 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.911013 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-vwt8b"] Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.996025 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-rsyslog-qwndp"] Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.997279 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/327253af-7db3-44a6-a9d7-da7e6adccc99-amphora-image\") pod \"octavia-image-upload-5955f5554b-vwt8b\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:42 crc kubenswrapper[5010]: I1126 17:10:42.997455 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/327253af-7db3-44a6-a9d7-da7e6adccc99-httpd-config\") pod \"octavia-image-upload-5955f5554b-vwt8b\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:43 crc kubenswrapper[5010]: W1126 17:10:43.001217 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod04ed61fb_8390_4ee1_a052_332b2bfdb369.slice/crio-e140e61594d1c41dfd9ca6bbb697cb2526009ca41a859f948382ca8f20c20c2d WatchSource:0}: Error finding container e140e61594d1c41dfd9ca6bbb697cb2526009ca41a859f948382ca8f20c20c2d: Status 404 returned error can't find the container with id e140e61594d1c41dfd9ca6bbb697cb2526009ca41a859f948382ca8f20c20c2d Nov 26 17:10:43 crc kubenswrapper[5010]: I1126 17:10:43.099652 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/327253af-7db3-44a6-a9d7-da7e6adccc99-httpd-config\") pod \"octavia-image-upload-5955f5554b-vwt8b\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:43 crc kubenswrapper[5010]: I1126 17:10:43.101203 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/327253af-7db3-44a6-a9d7-da7e6adccc99-amphora-image\") pod \"octavia-image-upload-5955f5554b-vwt8b\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:43 crc kubenswrapper[5010]: I1126 17:10:43.101759 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/327253af-7db3-44a6-a9d7-da7e6adccc99-amphora-image\") pod \"octavia-image-upload-5955f5554b-vwt8b\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:43 crc kubenswrapper[5010]: I1126 17:10:43.111218 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/327253af-7db3-44a6-a9d7-da7e6adccc99-httpd-config\") pod \"octavia-image-upload-5955f5554b-vwt8b\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:43 crc kubenswrapper[5010]: I1126 17:10:43.211740 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:10:43 crc kubenswrapper[5010]: I1126 17:10:43.573355 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qwndp" event={"ID":"04ed61fb-8390-4ee1-a052-332b2bfdb369","Type":"ContainerStarted","Data":"e140e61594d1c41dfd9ca6bbb697cb2526009ca41a859f948382ca8f20c20c2d"} Nov 26 17:10:43 crc kubenswrapper[5010]: I1126 17:10:43.668343 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-vwt8b"] Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.098553 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-api-86dcb45b4b-cptcz"] Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.108735 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.111144 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-public-svc" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.111408 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-octavia-internal-svc" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.142564 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-86dcb45b4b-cptcz"] Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.227953 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6169-a519-4a15-810a-b774180a35bb-config-data-merged\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.228015 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-internal-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.228169 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-config-data\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.228264 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6169-a519-4a15-810a-b774180a35bb-octavia-run\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.228346 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-public-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.228759 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-scripts\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.228800 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-combined-ca-bundle\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.228817 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-ovndb-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.331194 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-scripts\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.331251 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-ovndb-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.332289 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-combined-ca-bundle\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.332366 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6169-a519-4a15-810a-b774180a35bb-config-data-merged\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.332407 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-internal-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.332435 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-config-data\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.332459 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6169-a519-4a15-810a-b774180a35bb-octavia-run\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.332488 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-public-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.333444 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6169-a519-4a15-810a-b774180a35bb-octavia-run\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.333731 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/0cdd6169-a519-4a15-810a-b774180a35bb-config-data-merged\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.337586 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-public-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.337769 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-combined-ca-bundle\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.338491 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-config-data\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.339787 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-internal-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.340478 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-scripts\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.340571 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cdd6169-a519-4a15-810a-b774180a35bb-ovndb-tls-certs\") pod \"octavia-api-86dcb45b4b-cptcz\" (UID: \"0cdd6169-a519-4a15-810a-b774180a35bb\") " pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.448818 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:44 crc kubenswrapper[5010]: I1126 17:10:44.583516 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" event={"ID":"327253af-7db3-44a6-a9d7-da7e6adccc99","Type":"ContainerStarted","Data":"05a74415de902ce40c83b6234ea7fd47444aa32a6048cdc51503812b9af12bc6"} Nov 26 17:10:45 crc kubenswrapper[5010]: I1126 17:10:45.179092 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-api-86dcb45b4b-cptcz"] Nov 26 17:10:45 crc kubenswrapper[5010]: I1126 17:10:45.596229 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qwndp" event={"ID":"04ed61fb-8390-4ee1-a052-332b2bfdb369","Type":"ContainerStarted","Data":"8a3753d552912f0fa248277525b07572a5e139c08a8bf838f39c37f89616bef2"} Nov 26 17:10:45 crc kubenswrapper[5010]: I1126 17:10:45.600187 5010 generic.go:334] "Generic (PLEG): container finished" podID="0cdd6169-a519-4a15-810a-b774180a35bb" containerID="02f5757e256ea63a485a7daf9585698dd7737b6d7fedd6c1f6996344f12db67a" exitCode=0 Nov 26 17:10:45 crc kubenswrapper[5010]: I1126 17:10:45.600247 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-86dcb45b4b-cptcz" event={"ID":"0cdd6169-a519-4a15-810a-b774180a35bb","Type":"ContainerDied","Data":"02f5757e256ea63a485a7daf9585698dd7737b6d7fedd6c1f6996344f12db67a"} Nov 26 17:10:45 crc kubenswrapper[5010]: I1126 17:10:45.600280 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-86dcb45b4b-cptcz" event={"ID":"0cdd6169-a519-4a15-810a-b774180a35bb","Type":"ContainerStarted","Data":"a2661fe42cad61ffe66843c4f1306cb82cd0329d60b65a35b3a7dec767460d12"} Nov 26 17:10:46 crc kubenswrapper[5010]: I1126 17:10:46.613184 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-86dcb45b4b-cptcz" event={"ID":"0cdd6169-a519-4a15-810a-b774180a35bb","Type":"ContainerStarted","Data":"610be4f57394520b255de1644c8f0d38d5acba20e7cf3f4ece9138db334d80be"} Nov 26 17:10:46 crc kubenswrapper[5010]: I1126 17:10:46.613453 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-86dcb45b4b-cptcz" event={"ID":"0cdd6169-a519-4a15-810a-b774180a35bb","Type":"ContainerStarted","Data":"42134af7ddb2a647222436d302d2ec738f24d3dfba6e64e2592a8003914e6da6"} Nov 26 17:10:46 crc kubenswrapper[5010]: I1126 17:10:46.643723 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-api-86dcb45b4b-cptcz" podStartSLOduration=2.643679509 podStartE2EDuration="2.643679509s" podCreationTimestamp="2025-11-26 17:10:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:10:46.637075284 +0000 UTC m=+6267.427792452" watchObservedRunningTime="2025-11-26 17:10:46.643679509 +0000 UTC m=+6267.434396647" Nov 26 17:10:47 crc kubenswrapper[5010]: I1126 17:10:47.626144 5010 generic.go:334] "Generic (PLEG): container finished" podID="04ed61fb-8390-4ee1-a052-332b2bfdb369" containerID="8a3753d552912f0fa248277525b07572a5e139c08a8bf838f39c37f89616bef2" exitCode=0 Nov 26 17:10:47 crc kubenswrapper[5010]: I1126 17:10:47.626278 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qwndp" event={"ID":"04ed61fb-8390-4ee1-a052-332b2bfdb369","Type":"ContainerDied","Data":"8a3753d552912f0fa248277525b07572a5e139c08a8bf838f39c37f89616bef2"} Nov 26 17:10:47 crc kubenswrapper[5010]: I1126 17:10:47.626818 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:47 crc kubenswrapper[5010]: I1126 17:10:47.626877 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:10:47 crc kubenswrapper[5010]: I1126 17:10:47.842240 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:48 crc kubenswrapper[5010]: I1126 17:10:48.251001 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.122015 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-db-sync-p77g4"] Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.124535 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.126692 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-scripts" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.133738 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-p77g4"] Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.208496 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-config-data\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.208620 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-scripts\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.208661 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-combined-ca-bundle\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.208692 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/14bbaf27-d876-4901-918d-6bc09332f656-config-data-merged\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.311184 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-config-data\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.311307 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-scripts\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.311347 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-combined-ca-bundle\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.311379 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/14bbaf27-d876-4901-918d-6bc09332f656-config-data-merged\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.311933 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/14bbaf27-d876-4901-918d-6bc09332f656-config-data-merged\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.317063 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-combined-ca-bundle\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.317132 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-scripts\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.321627 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-config-data\") pod \"octavia-db-sync-p77g4\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:52 crc kubenswrapper[5010]: I1126 17:10:52.448821 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-p77g4" Nov 26 17:10:53 crc kubenswrapper[5010]: I1126 17:10:53.045769 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-db-sync-p77g4"] Nov 26 17:10:56 crc kubenswrapper[5010]: I1126 17:10:56.722521 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-p77g4" event={"ID":"14bbaf27-d876-4901-918d-6bc09332f656","Type":"ContainerStarted","Data":"229ee03693be1e7f74fcc1f427f16713b7934a24fc53681b2c7cb7d5d0afc01c"} Nov 26 17:10:57 crc kubenswrapper[5010]: I1126 17:10:57.734691 5010 generic.go:334] "Generic (PLEG): container finished" podID="14bbaf27-d876-4901-918d-6bc09332f656" containerID="00095defb9449b856e1df11c82884911a6f874f250f7846e5f3d0ff508a95a3b" exitCode=0 Nov 26 17:10:57 crc kubenswrapper[5010]: I1126 17:10:57.734814 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-p77g4" event={"ID":"14bbaf27-d876-4901-918d-6bc09332f656","Type":"ContainerDied","Data":"00095defb9449b856e1df11c82884911a6f874f250f7846e5f3d0ff508a95a3b"} Nov 26 17:10:57 crc kubenswrapper[5010]: I1126 17:10:57.741855 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" event={"ID":"327253af-7db3-44a6-a9d7-da7e6adccc99","Type":"ContainerStarted","Data":"1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c"} Nov 26 17:10:57 crc kubenswrapper[5010]: I1126 17:10:57.746635 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-rsyslog-qwndp" event={"ID":"04ed61fb-8390-4ee1-a052-332b2bfdb369","Type":"ContainerStarted","Data":"15b31b00240433b14de41aec8642c9c5e72d802877ba945b15d0878be8c685ed"} Nov 26 17:10:57 crc kubenswrapper[5010]: I1126 17:10:57.746951 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:10:57 crc kubenswrapper[5010]: I1126 17:10:57.816299 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-rsyslog-qwndp" podStartSLOduration=1.801596553 podStartE2EDuration="15.816275707s" podCreationTimestamp="2025-11-26 17:10:42 +0000 UTC" firstStartedPulling="2025-11-26 17:10:43.003333154 +0000 UTC m=+6263.794050302" lastFinishedPulling="2025-11-26 17:10:57.018012308 +0000 UTC m=+6277.808729456" observedRunningTime="2025-11-26 17:10:57.799753646 +0000 UTC m=+6278.590470844" watchObservedRunningTime="2025-11-26 17:10:57.816275707 +0000 UTC m=+6278.606992855" Nov 26 17:10:58 crc kubenswrapper[5010]: I1126 17:10:58.762225 5010 generic.go:334] "Generic (PLEG): container finished" podID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerID="1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c" exitCode=0 Nov 26 17:10:58 crc kubenswrapper[5010]: I1126 17:10:58.762313 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" event={"ID":"327253af-7db3-44a6-a9d7-da7e6adccc99","Type":"ContainerDied","Data":"1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c"} Nov 26 17:10:58 crc kubenswrapper[5010]: I1126 17:10:58.768923 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-p77g4" event={"ID":"14bbaf27-d876-4901-918d-6bc09332f656","Type":"ContainerStarted","Data":"ae58dd5de0cda88047c2ee7de60b8b82dcff86584a102cde62fe688f4226c904"} Nov 26 17:10:58 crc kubenswrapper[5010]: I1126 17:10:58.822771 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-db-sync-p77g4" podStartSLOduration=6.822748412 podStartE2EDuration="6.822748412s" podCreationTimestamp="2025-11-26 17:10:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:10:58.811438621 +0000 UTC m=+6279.602155759" watchObservedRunningTime="2025-11-26 17:10:58.822748412 +0000 UTC m=+6279.613465590" Nov 26 17:10:59 crc kubenswrapper[5010]: I1126 17:10:59.781487 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" event={"ID":"327253af-7db3-44a6-a9d7-da7e6adccc99","Type":"ContainerStarted","Data":"a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c"} Nov 26 17:10:59 crc kubenswrapper[5010]: I1126 17:10:59.811649 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" podStartSLOduration=4.405927788 podStartE2EDuration="17.81162872s" podCreationTimestamp="2025-11-26 17:10:42 +0000 UTC" firstStartedPulling="2025-11-26 17:10:43.664994926 +0000 UTC m=+6264.455712074" lastFinishedPulling="2025-11-26 17:10:57.070695858 +0000 UTC m=+6277.861413006" observedRunningTime="2025-11-26 17:10:59.797254523 +0000 UTC m=+6280.587971681" watchObservedRunningTime="2025-11-26 17:10:59.81162872 +0000 UTC m=+6280.602345868" Nov 26 17:11:03 crc kubenswrapper[5010]: I1126 17:11:03.629963 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:11:03 crc kubenswrapper[5010]: I1126 17:11:03.725895 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-api-86dcb45b4b-cptcz" Nov 26 17:11:03 crc kubenswrapper[5010]: I1126 17:11:03.808733 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-67c9d644dd-m7cx4"] Nov 26 17:11:03 crc kubenswrapper[5010]: I1126 17:11:03.809442 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-67c9d644dd-m7cx4" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api" containerID="cri-o://425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282" gracePeriod=30 Nov 26 17:11:03 crc kubenswrapper[5010]: I1126 17:11:03.810127 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-api-67c9d644dd-m7cx4" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api-provider-agent" containerID="cri-o://67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369" gracePeriod=30 Nov 26 17:11:05 crc kubenswrapper[5010]: I1126 17:11:05.860875 5010 generic.go:334] "Generic (PLEG): container finished" podID="215d6716-e893-4fb1-846c-4538c12c888a" containerID="67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369" exitCode=0 Nov 26 17:11:05 crc kubenswrapper[5010]: I1126 17:11:05.861087 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-67c9d644dd-m7cx4" event={"ID":"215d6716-e893-4fb1-846c-4538c12c888a","Type":"ContainerDied","Data":"67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369"} Nov 26 17:11:05 crc kubenswrapper[5010]: I1126 17:11:05.863304 5010 generic.go:334] "Generic (PLEG): container finished" podID="14bbaf27-d876-4901-918d-6bc09332f656" containerID="ae58dd5de0cda88047c2ee7de60b8b82dcff86584a102cde62fe688f4226c904" exitCode=0 Nov 26 17:11:05 crc kubenswrapper[5010]: I1126 17:11:05.863341 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-p77g4" event={"ID":"14bbaf27-d876-4901-918d-6bc09332f656","Type":"ContainerDied","Data":"ae58dd5de0cda88047c2ee7de60b8b82dcff86584a102cde62fe688f4226c904"} Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.299535 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-p77g4" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.400113 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-scripts\") pod \"14bbaf27-d876-4901-918d-6bc09332f656\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.400231 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-config-data\") pod \"14bbaf27-d876-4901-918d-6bc09332f656\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.400390 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/14bbaf27-d876-4901-918d-6bc09332f656-config-data-merged\") pod \"14bbaf27-d876-4901-918d-6bc09332f656\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.400451 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-combined-ca-bundle\") pod \"14bbaf27-d876-4901-918d-6bc09332f656\" (UID: \"14bbaf27-d876-4901-918d-6bc09332f656\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.406826 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-config-data" (OuterVolumeSpecName: "config-data") pod "14bbaf27-d876-4901-918d-6bc09332f656" (UID: "14bbaf27-d876-4901-918d-6bc09332f656"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.407404 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-scripts" (OuterVolumeSpecName: "scripts") pod "14bbaf27-d876-4901-918d-6bc09332f656" (UID: "14bbaf27-d876-4901-918d-6bc09332f656"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.434937 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14bbaf27-d876-4901-918d-6bc09332f656-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "14bbaf27-d876-4901-918d-6bc09332f656" (UID: "14bbaf27-d876-4901-918d-6bc09332f656"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.443480 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "14bbaf27-d876-4901-918d-6bc09332f656" (UID: "14bbaf27-d876-4901-918d-6bc09332f656"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.496817 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.503099 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/14bbaf27-d876-4901-918d-6bc09332f656-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.503161 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.503176 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.503189 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/14bbaf27-d876-4901-918d-6bc09332f656-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.604785 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-config-data\") pod \"215d6716-e893-4fb1-846c-4538c12c888a\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.604988 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-scripts\") pod \"215d6716-e893-4fb1-846c-4538c12c888a\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.605037 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-config-data-merged\") pod \"215d6716-e893-4fb1-846c-4538c12c888a\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.605166 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-combined-ca-bundle\") pod \"215d6716-e893-4fb1-846c-4538c12c888a\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.605312 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-octavia-run\") pod \"215d6716-e893-4fb1-846c-4538c12c888a\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.605354 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-ovndb-tls-certs\") pod \"215d6716-e893-4fb1-846c-4538c12c888a\" (UID: \"215d6716-e893-4fb1-846c-4538c12c888a\") " Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.607412 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-octavia-run" (OuterVolumeSpecName: "octavia-run") pod "215d6716-e893-4fb1-846c-4538c12c888a" (UID: "215d6716-e893-4fb1-846c-4538c12c888a"). InnerVolumeSpecName "octavia-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.607786 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-config-data" (OuterVolumeSpecName: "config-data") pod "215d6716-e893-4fb1-846c-4538c12c888a" (UID: "215d6716-e893-4fb1-846c-4538c12c888a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.609496 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-scripts" (OuterVolumeSpecName: "scripts") pod "215d6716-e893-4fb1-846c-4538c12c888a" (UID: "215d6716-e893-4fb1-846c-4538c12c888a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.666501 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "215d6716-e893-4fb1-846c-4538c12c888a" (UID: "215d6716-e893-4fb1-846c-4538c12c888a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.667194 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-config-data-merged" (OuterVolumeSpecName: "config-data-merged") pod "215d6716-e893-4fb1-846c-4538c12c888a" (UID: "215d6716-e893-4fb1-846c-4538c12c888a"). InnerVolumeSpecName "config-data-merged". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.707493 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.707534 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-config-data-merged\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.707547 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.707558 5010 reconciler_common.go:293] "Volume detached for volume \"octavia-run\" (UniqueName: \"kubernetes.io/empty-dir/215d6716-e893-4fb1-846c-4538c12c888a-octavia-run\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.707570 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.775020 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "215d6716-e893-4fb1-846c-4538c12c888a" (UID: "215d6716-e893-4fb1-846c-4538c12c888a"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.809324 5010 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/215d6716-e893-4fb1-846c-4538c12c888a-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.889250 5010 generic.go:334] "Generic (PLEG): container finished" podID="215d6716-e893-4fb1-846c-4538c12c888a" containerID="425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282" exitCode=0 Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.889331 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-api-67c9d644dd-m7cx4" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.890044 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-67c9d644dd-m7cx4" event={"ID":"215d6716-e893-4fb1-846c-4538c12c888a","Type":"ContainerDied","Data":"425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282"} Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.890354 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-api-67c9d644dd-m7cx4" event={"ID":"215d6716-e893-4fb1-846c-4538c12c888a","Type":"ContainerDied","Data":"163aa2241ac7b0feb0b83380deb6e012c3bb643a31aa9b8b30d1c061309b9eaf"} Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.890434 5010 scope.go:117] "RemoveContainer" containerID="67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.894071 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-db-sync-p77g4" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.923753 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-db-sync-p77g4" event={"ID":"14bbaf27-d876-4901-918d-6bc09332f656","Type":"ContainerDied","Data":"229ee03693be1e7f74fcc1f427f16713b7934a24fc53681b2c7cb7d5d0afc01c"} Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.924112 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="229ee03693be1e7f74fcc1f427f16713b7934a24fc53681b2c7cb7d5d0afc01c" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.939493 5010 scope.go:117] "RemoveContainer" containerID="425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.954679 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-api-67c9d644dd-m7cx4"] Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.964209 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-api-67c9d644dd-m7cx4"] Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.966196 5010 scope.go:117] "RemoveContainer" containerID="a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.997883 5010 scope.go:117] "RemoveContainer" containerID="67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369" Nov 26 17:11:07 crc kubenswrapper[5010]: E1126 17:11:07.998478 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369\": container with ID starting with 67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369 not found: ID does not exist" containerID="67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.998509 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369"} err="failed to get container status \"67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369\": rpc error: code = NotFound desc = could not find container \"67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369\": container with ID starting with 67fa7bceb18a8a4192373795a89eedb0e4fbe25019e49342c23412928f7cd369 not found: ID does not exist" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.998533 5010 scope.go:117] "RemoveContainer" containerID="425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282" Nov 26 17:11:07 crc kubenswrapper[5010]: E1126 17:11:07.998950 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282\": container with ID starting with 425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282 not found: ID does not exist" containerID="425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.998980 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282"} err="failed to get container status \"425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282\": rpc error: code = NotFound desc = could not find container \"425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282\": container with ID starting with 425d5ef454df664ad5c26a9828647f7c13a5a7b54defde53b73e25ac933bd282 not found: ID does not exist" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.999000 5010 scope.go:117] "RemoveContainer" containerID="a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89" Nov 26 17:11:07 crc kubenswrapper[5010]: E1126 17:11:07.999265 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89\": container with ID starting with a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89 not found: ID does not exist" containerID="a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89" Nov 26 17:11:07 crc kubenswrapper[5010]: I1126 17:11:07.999294 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89"} err="failed to get container status \"a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89\": rpc error: code = NotFound desc = could not find container \"a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89\": container with ID starting with a186fc4f9e98d333248819a6ae0e1fa603a1cd051bdfac771b3f98f0f7d18c89 not found: ID does not exist" Nov 26 17:11:09 crc kubenswrapper[5010]: I1126 17:11:09.906117 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="215d6716-e893-4fb1-846c-4538c12c888a" path="/var/lib/kubelet/pods/215d6716-e893-4fb1-846c-4538c12c888a/volumes" Nov 26 17:11:11 crc kubenswrapper[5010]: I1126 17:11:11.423381 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:11:11 crc kubenswrapper[5010]: I1126 17:11:11.423448 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:11:12 crc kubenswrapper[5010]: I1126 17:11:12.416027 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-rsyslog-qwndp" Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.118147 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-vwt8b"] Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.118876 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" podUID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerName="octavia-amphora-httpd" containerID="cri-o://a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c" gracePeriod=30 Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.714850 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.877682 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/327253af-7db3-44a6-a9d7-da7e6adccc99-amphora-image\") pod \"327253af-7db3-44a6-a9d7-da7e6adccc99\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.877980 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/327253af-7db3-44a6-a9d7-da7e6adccc99-httpd-config\") pod \"327253af-7db3-44a6-a9d7-da7e6adccc99\" (UID: \"327253af-7db3-44a6-a9d7-da7e6adccc99\") " Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.910929 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/327253af-7db3-44a6-a9d7-da7e6adccc99-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "327253af-7db3-44a6-a9d7-da7e6adccc99" (UID: "327253af-7db3-44a6-a9d7-da7e6adccc99"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.960608 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/327253af-7db3-44a6-a9d7-da7e6adccc99-amphora-image" (OuterVolumeSpecName: "amphora-image") pod "327253af-7db3-44a6-a9d7-da7e6adccc99" (UID: "327253af-7db3-44a6-a9d7-da7e6adccc99"). InnerVolumeSpecName "amphora-image". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.980151 5010 reconciler_common.go:293] "Volume detached for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/327253af-7db3-44a6-a9d7-da7e6adccc99-amphora-image\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:31 crc kubenswrapper[5010]: I1126 17:11:31.980187 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/327253af-7db3-44a6-a9d7-da7e6adccc99-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.259024 5010 generic.go:334] "Generic (PLEG): container finished" podID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerID="a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c" exitCode=0 Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.259074 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" event={"ID":"327253af-7db3-44a6-a9d7-da7e6adccc99","Type":"ContainerDied","Data":"a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c"} Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.259106 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" event={"ID":"327253af-7db3-44a6-a9d7-da7e6adccc99","Type":"ContainerDied","Data":"05a74415de902ce40c83b6234ea7fd47444aa32a6048cdc51503812b9af12bc6"} Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.259126 5010 scope.go:117] "RemoveContainer" containerID="a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c" Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.259315 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-vwt8b" Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.373422 5010 scope.go:117] "RemoveContainer" containerID="1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c" Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.377077 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-vwt8b"] Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.386450 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-vwt8b"] Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.433069 5010 scope.go:117] "RemoveContainer" containerID="a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c" Nov 26 17:11:32 crc kubenswrapper[5010]: E1126 17:11:32.436958 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c\": container with ID starting with a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c not found: ID does not exist" containerID="a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c" Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.437039 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c"} err="failed to get container status \"a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c\": rpc error: code = NotFound desc = could not find container \"a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c\": container with ID starting with a56cd1d417f0b8ae1125449edaef50609b43904fb86b6d8408a2eab14876f21c not found: ID does not exist" Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.437091 5010 scope.go:117] "RemoveContainer" containerID="1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c" Nov 26 17:11:32 crc kubenswrapper[5010]: E1126 17:11:32.439557 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c\": container with ID starting with 1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c not found: ID does not exist" containerID="1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c" Nov 26 17:11:32 crc kubenswrapper[5010]: I1126 17:11:32.439619 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c"} err="failed to get container status \"1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c\": rpc error: code = NotFound desc = could not find container \"1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c\": container with ID starting with 1d6e48af57f737ac8cf896f7cb80214beeb0d25b135e3db2d3a88d6aafba1d2c not found: ID does not exist" Nov 26 17:11:33 crc kubenswrapper[5010]: I1126 17:11:33.907061 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="327253af-7db3-44a6-a9d7-da7e6adccc99" path="/var/lib/kubelet/pods/327253af-7db3-44a6-a9d7-da7e6adccc99/volumes" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.853677 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-image-upload-5955f5554b-22m5x"] Nov 26 17:11:36 crc kubenswrapper[5010]: E1126 17:11:36.854735 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerName="octavia-amphora-httpd" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.854752 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerName="octavia-amphora-httpd" Nov 26 17:11:36 crc kubenswrapper[5010]: E1126 17:11:36.854769 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.854776 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api" Nov 26 17:11:36 crc kubenswrapper[5010]: E1126 17:11:36.854790 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="init" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.854798 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="init" Nov 26 17:11:36 crc kubenswrapper[5010]: E1126 17:11:36.854831 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14bbaf27-d876-4901-918d-6bc09332f656" containerName="octavia-db-sync" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.854838 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="14bbaf27-d876-4901-918d-6bc09332f656" containerName="octavia-db-sync" Nov 26 17:11:36 crc kubenswrapper[5010]: E1126 17:11:36.854853 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerName="init" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.854860 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerName="init" Nov 26 17:11:36 crc kubenswrapper[5010]: E1126 17:11:36.854870 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api-provider-agent" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.854879 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api-provider-agent" Nov 26 17:11:36 crc kubenswrapper[5010]: E1126 17:11:36.854893 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14bbaf27-d876-4901-918d-6bc09332f656" containerName="init" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.854899 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="14bbaf27-d876-4901-918d-6bc09332f656" containerName="init" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.855118 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.855152 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="215d6716-e893-4fb1-846c-4538c12c888a" containerName="octavia-api-provider-agent" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.855169 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="327253af-7db3-44a6-a9d7-da7e6adccc99" containerName="octavia-amphora-httpd" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.855182 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="14bbaf27-d876-4901-918d-6bc09332f656" containerName="octavia-db-sync" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.856482 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.860462 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-config-data" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.871650 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-22m5x"] Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.907213 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/c2804229-e969-49e6-806a-d132e8338b87-amphora-image\") pod \"octavia-image-upload-5955f5554b-22m5x\" (UID: \"c2804229-e969-49e6-806a-d132e8338b87\") " pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:36 crc kubenswrapper[5010]: I1126 17:11:36.907574 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c2804229-e969-49e6-806a-d132e8338b87-httpd-config\") pod \"octavia-image-upload-5955f5554b-22m5x\" (UID: \"c2804229-e969-49e6-806a-d132e8338b87\") " pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:37 crc kubenswrapper[5010]: I1126 17:11:37.009615 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/c2804229-e969-49e6-806a-d132e8338b87-amphora-image\") pod \"octavia-image-upload-5955f5554b-22m5x\" (UID: \"c2804229-e969-49e6-806a-d132e8338b87\") " pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:37 crc kubenswrapper[5010]: I1126 17:11:37.009690 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c2804229-e969-49e6-806a-d132e8338b87-httpd-config\") pod \"octavia-image-upload-5955f5554b-22m5x\" (UID: \"c2804229-e969-49e6-806a-d132e8338b87\") " pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:37 crc kubenswrapper[5010]: I1126 17:11:37.010237 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-image\" (UniqueName: \"kubernetes.io/empty-dir/c2804229-e969-49e6-806a-d132e8338b87-amphora-image\") pod \"octavia-image-upload-5955f5554b-22m5x\" (UID: \"c2804229-e969-49e6-806a-d132e8338b87\") " pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:37 crc kubenswrapper[5010]: I1126 17:11:37.016459 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/c2804229-e969-49e6-806a-d132e8338b87-httpd-config\") pod \"octavia-image-upload-5955f5554b-22m5x\" (UID: \"c2804229-e969-49e6-806a-d132e8338b87\") " pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:37 crc kubenswrapper[5010]: I1126 17:11:37.188306 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-image-upload-5955f5554b-22m5x" Nov 26 17:11:37 crc kubenswrapper[5010]: I1126 17:11:37.668589 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-image-upload-5955f5554b-22m5x"] Nov 26 17:11:38 crc kubenswrapper[5010]: I1126 17:11:38.320343 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-22m5x" event={"ID":"c2804229-e969-49e6-806a-d132e8338b87","Type":"ContainerStarted","Data":"0625e43abd6d3f9bb198f319b7b70aeb06f43e5b12c246761b190be4141eb9a3"} Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.335669 5010 generic.go:334] "Generic (PLEG): container finished" podID="c2804229-e969-49e6-806a-d132e8338b87" containerID="4eb95fa36f3eb4840d02c7f1b0f2407ce2a60406729cf8a338b04e3e1455cdc2" exitCode=0 Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.335886 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-22m5x" event={"ID":"c2804229-e969-49e6-806a-d132e8338b87","Type":"ContainerDied","Data":"4eb95fa36f3eb4840d02c7f1b0f2407ce2a60406729cf8a338b04e3e1455cdc2"} Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.508243 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-sjfsz"] Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.513157 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.528232 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjfsz"] Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.559248 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-utilities\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.559418 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdzwl\" (UniqueName: \"kubernetes.io/projected/0bd0a59c-254c-4822-8671-17192366524c-kube-api-access-pdzwl\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.559447 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-catalog-content\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.661155 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdzwl\" (UniqueName: \"kubernetes.io/projected/0bd0a59c-254c-4822-8671-17192366524c-kube-api-access-pdzwl\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.661419 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-catalog-content\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.661491 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-utilities\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.661941 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-catalog-content\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.661980 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-utilities\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.677140 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdzwl\" (UniqueName: \"kubernetes.io/projected/0bd0a59c-254c-4822-8671-17192366524c-kube-api-access-pdzwl\") pod \"redhat-marketplace-sjfsz\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:39 crc kubenswrapper[5010]: I1126 17:11:39.840814 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:40 crc kubenswrapper[5010]: I1126 17:11:40.377702 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-image-upload-5955f5554b-22m5x" event={"ID":"c2804229-e969-49e6-806a-d132e8338b87","Type":"ContainerStarted","Data":"ac996b9f5bf6f32b8d51479dedb37012a242fcc9f6220116357596252c13d6ea"} Nov 26 17:11:40 crc kubenswrapper[5010]: I1126 17:11:40.408277 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-image-upload-5955f5554b-22m5x" podStartSLOduration=3.967157511 podStartE2EDuration="4.408254779s" podCreationTimestamp="2025-11-26 17:11:36 +0000 UTC" firstStartedPulling="2025-11-26 17:11:37.681855029 +0000 UTC m=+6318.472572177" lastFinishedPulling="2025-11-26 17:11:38.122952257 +0000 UTC m=+6318.913669445" observedRunningTime="2025-11-26 17:11:40.399168143 +0000 UTC m=+6321.189885291" watchObservedRunningTime="2025-11-26 17:11:40.408254779 +0000 UTC m=+6321.198971927" Nov 26 17:11:40 crc kubenswrapper[5010]: I1126 17:11:40.465136 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjfsz"] Nov 26 17:11:41 crc kubenswrapper[5010]: I1126 17:11:41.387486 5010 generic.go:334] "Generic (PLEG): container finished" podID="0bd0a59c-254c-4822-8671-17192366524c" containerID="abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596" exitCode=0 Nov 26 17:11:41 crc kubenswrapper[5010]: I1126 17:11:41.387575 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjfsz" event={"ID":"0bd0a59c-254c-4822-8671-17192366524c","Type":"ContainerDied","Data":"abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596"} Nov 26 17:11:41 crc kubenswrapper[5010]: I1126 17:11:41.388410 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjfsz" event={"ID":"0bd0a59c-254c-4822-8671-17192366524c","Type":"ContainerStarted","Data":"5010caf8532eaa5253e9fef1334791744145e99520c3c2b151eadd88079fbb1f"} Nov 26 17:11:41 crc kubenswrapper[5010]: I1126 17:11:41.422862 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:11:41 crc kubenswrapper[5010]: I1126 17:11:41.422990 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:11:43 crc kubenswrapper[5010]: I1126 17:11:43.421375 5010 generic.go:334] "Generic (PLEG): container finished" podID="0bd0a59c-254c-4822-8671-17192366524c" containerID="d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784" exitCode=0 Nov 26 17:11:43 crc kubenswrapper[5010]: I1126 17:11:43.421538 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjfsz" event={"ID":"0bd0a59c-254c-4822-8671-17192366524c","Type":"ContainerDied","Data":"d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784"} Nov 26 17:11:44 crc kubenswrapper[5010]: I1126 17:11:44.434794 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjfsz" event={"ID":"0bd0a59c-254c-4822-8671-17192366524c","Type":"ContainerStarted","Data":"a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b"} Nov 26 17:11:44 crc kubenswrapper[5010]: I1126 17:11:44.455466 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-sjfsz" podStartSLOduration=2.839766154 podStartE2EDuration="5.45543662s" podCreationTimestamp="2025-11-26 17:11:39 +0000 UTC" firstStartedPulling="2025-11-26 17:11:41.389505568 +0000 UTC m=+6322.180222716" lastFinishedPulling="2025-11-26 17:11:44.005176034 +0000 UTC m=+6324.795893182" observedRunningTime="2025-11-26 17:11:44.452515107 +0000 UTC m=+6325.243232265" watchObservedRunningTime="2025-11-26 17:11:44.45543662 +0000 UTC m=+6325.246153808" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.391349 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-healthmanager-rs97n"] Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.394658 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.396855 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-scripts" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.397520 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-healthmanager-config-data" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.398562 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-certs-secret" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.416613 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-rs97n"] Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.553602 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-combined-ca-bundle\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.554183 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d7332edc-62f6-4f6f-a6b5-8024a073631e-config-data-merged\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.554405 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-config-data\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.554587 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-amphora-certs\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.555002 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/d7332edc-62f6-4f6f-a6b5-8024a073631e-hm-ports\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.556658 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-scripts\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.658673 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/d7332edc-62f6-4f6f-a6b5-8024a073631e-hm-ports\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.658750 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-scripts\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.658807 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-combined-ca-bundle\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.658822 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d7332edc-62f6-4f6f-a6b5-8024a073631e-config-data-merged\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.658862 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-config-data\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.658885 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-amphora-certs\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.660773 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/d7332edc-62f6-4f6f-a6b5-8024a073631e-config-data-merged\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.661524 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/d7332edc-62f6-4f6f-a6b5-8024a073631e-hm-ports\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.665673 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-amphora-certs\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.666863 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-scripts\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.667512 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-config-data\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.675393 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7332edc-62f6-4f6f-a6b5-8024a073631e-combined-ca-bundle\") pod \"octavia-healthmanager-rs97n\" (UID: \"d7332edc-62f6-4f6f-a6b5-8024a073631e\") " pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:48 crc kubenswrapper[5010]: I1126 17:11:48.716449 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:49 crc kubenswrapper[5010]: I1126 17:11:49.474284 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-healthmanager-rs97n"] Nov 26 17:11:49 crc kubenswrapper[5010]: I1126 17:11:49.518809 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-rs97n" event={"ID":"d7332edc-62f6-4f6f-a6b5-8024a073631e","Type":"ContainerStarted","Data":"9e6d97541ebfa66e76a257070dd27a4ed1a917d3aacbe894286d8fdcc211f420"} Nov 26 17:11:49 crc kubenswrapper[5010]: I1126 17:11:49.840978 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:49 crc kubenswrapper[5010]: I1126 17:11:49.842181 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:49 crc kubenswrapper[5010]: I1126 17:11:49.928439 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.440690 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-housekeeping-hr695"] Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.442946 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.445047 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-scripts" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.449476 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-housekeeping-config-data" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.467845 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-hr695"] Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.529045 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-rs97n" event={"ID":"d7332edc-62f6-4f6f-a6b5-8024a073631e","Type":"ContainerStarted","Data":"4bdba97308ed37fc49d98410110b7e47d4de36235fa3ae2f9b92b8f24e8d3595"} Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.586587 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.637539 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-config-data\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.637592 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/4d881ca5-eedc-4457-a85e-252ebb895dc3-hm-ports\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.637750 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-amphora-certs\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.637785 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4d881ca5-eedc-4457-a85e-252ebb895dc3-config-data-merged\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.637815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-scripts\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.637832 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-combined-ca-bundle\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.645434 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjfsz"] Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.740083 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-amphora-certs\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.740370 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4d881ca5-eedc-4457-a85e-252ebb895dc3-config-data-merged\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.740518 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-scripts\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.740619 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-combined-ca-bundle\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.740766 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-config-data\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.740850 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/4d881ca5-eedc-4457-a85e-252ebb895dc3-hm-ports\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.741663 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/4d881ca5-eedc-4457-a85e-252ebb895dc3-config-data-merged\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.742127 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/4d881ca5-eedc-4457-a85e-252ebb895dc3-hm-ports\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.747049 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-combined-ca-bundle\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.747922 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-config-data\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.750808 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-scripts\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.758074 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/4d881ca5-eedc-4457-a85e-252ebb895dc3-amphora-certs\") pod \"octavia-housekeeping-hr695\" (UID: \"4d881ca5-eedc-4457-a85e-252ebb895dc3\") " pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:50 crc kubenswrapper[5010]: I1126 17:11:50.764548 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.310008 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-housekeeping-hr695"] Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.542397 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-hr695" event={"ID":"4d881ca5-eedc-4457-a85e-252ebb895dc3","Type":"ContainerStarted","Data":"984eb9cfaa5a8bbd571ba6dc8384d6fd861dfa1fff87ac113e8dd1b52d3be8dd"} Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.597858 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/octavia-worker-92kdl"] Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.600158 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.602356 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-scripts" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.602589 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"octavia-worker-config-data" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.626464 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-92kdl"] Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.764763 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-config-data\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.764834 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-amphora-certs\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.765147 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-config-data-merged\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.765229 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-combined-ca-bundle\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.765323 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-scripts\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.765547 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-hm-ports\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.867102 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-hm-ports\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.867162 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-config-data\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.867222 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-amphora-certs\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.867314 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-config-data-merged\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.867348 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-combined-ca-bundle\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.867391 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-scripts\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.868888 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-merged\" (UniqueName: \"kubernetes.io/empty-dir/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-config-data-merged\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.869800 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hm-ports\" (UniqueName: \"kubernetes.io/configmap/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-hm-ports\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.873789 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-config-data\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.883407 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-combined-ca-bundle\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.883426 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"amphora-certs\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-amphora-certs\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.883881 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dc7aa8a9-668c-485c-ad2b-6ba848d528b7-scripts\") pod \"octavia-worker-92kdl\" (UID: \"dc7aa8a9-668c-485c-ad2b-6ba848d528b7\") " pod="openstack/octavia-worker-92kdl" Nov 26 17:11:51 crc kubenswrapper[5010]: I1126 17:11:51.962014 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/octavia-worker-92kdl" Nov 26 17:11:52 crc kubenswrapper[5010]: I1126 17:11:52.503318 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/octavia-worker-92kdl"] Nov 26 17:11:52 crc kubenswrapper[5010]: I1126 17:11:52.554278 5010 generic.go:334] "Generic (PLEG): container finished" podID="d7332edc-62f6-4f6f-a6b5-8024a073631e" containerID="4bdba97308ed37fc49d98410110b7e47d4de36235fa3ae2f9b92b8f24e8d3595" exitCode=0 Nov 26 17:11:52 crc kubenswrapper[5010]: I1126 17:11:52.554366 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-rs97n" event={"ID":"d7332edc-62f6-4f6f-a6b5-8024a073631e","Type":"ContainerDied","Data":"4bdba97308ed37fc49d98410110b7e47d4de36235fa3ae2f9b92b8f24e8d3595"} Nov 26 17:11:52 crc kubenswrapper[5010]: I1126 17:11:52.554494 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-sjfsz" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="registry-server" containerID="cri-o://a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b" gracePeriod=2 Nov 26 17:11:52 crc kubenswrapper[5010]: W1126 17:11:52.780385 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc7aa8a9_668c_485c_ad2b_6ba848d528b7.slice/crio-69c72a46ce7bb107a9971fe484e50540f82913d5723e791826c108e64f795995 WatchSource:0}: Error finding container 69c72a46ce7bb107a9971fe484e50540f82913d5723e791826c108e64f795995: Status 404 returned error can't find the container with id 69c72a46ce7bb107a9971fe484e50540f82913d5723e791826c108e64f795995 Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.225667 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.399375 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-catalog-content\") pod \"0bd0a59c-254c-4822-8671-17192366524c\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.399440 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdzwl\" (UniqueName: \"kubernetes.io/projected/0bd0a59c-254c-4822-8671-17192366524c-kube-api-access-pdzwl\") pod \"0bd0a59c-254c-4822-8671-17192366524c\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.399520 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-utilities\") pod \"0bd0a59c-254c-4822-8671-17192366524c\" (UID: \"0bd0a59c-254c-4822-8671-17192366524c\") " Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.400804 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-utilities" (OuterVolumeSpecName: "utilities") pod "0bd0a59c-254c-4822-8671-17192366524c" (UID: "0bd0a59c-254c-4822-8671-17192366524c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.407228 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bd0a59c-254c-4822-8671-17192366524c-kube-api-access-pdzwl" (OuterVolumeSpecName: "kube-api-access-pdzwl") pod "0bd0a59c-254c-4822-8671-17192366524c" (UID: "0bd0a59c-254c-4822-8671-17192366524c"). InnerVolumeSpecName "kube-api-access-pdzwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.419290 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0bd0a59c-254c-4822-8671-17192366524c" (UID: "0bd0a59c-254c-4822-8671-17192366524c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.501500 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.501536 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdzwl\" (UniqueName: \"kubernetes.io/projected/0bd0a59c-254c-4822-8671-17192366524c-kube-api-access-pdzwl\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.501550 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0bd0a59c-254c-4822-8671-17192366524c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.564198 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-hr695" event={"ID":"4d881ca5-eedc-4457-a85e-252ebb895dc3","Type":"ContainerStarted","Data":"0f1feca0d3c4463923ffccb62a6eb23c1ab64a97d95993e97a4022ba7224924d"} Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.566649 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-healthmanager-rs97n" event={"ID":"d7332edc-62f6-4f6f-a6b5-8024a073631e","Type":"ContainerStarted","Data":"f64d449281a2b2e7a49329cb0da94495cfd017004064284bfa2a9d15803dc122"} Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.566925 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.568257 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-92kdl" event={"ID":"dc7aa8a9-668c-485c-ad2b-6ba848d528b7","Type":"ContainerStarted","Data":"69c72a46ce7bb107a9971fe484e50540f82913d5723e791826c108e64f795995"} Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.572201 5010 generic.go:334] "Generic (PLEG): container finished" podID="0bd0a59c-254c-4822-8671-17192366524c" containerID="a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b" exitCode=0 Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.572271 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjfsz" event={"ID":"0bd0a59c-254c-4822-8671-17192366524c","Type":"ContainerDied","Data":"a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b"} Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.572309 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-sjfsz" event={"ID":"0bd0a59c-254c-4822-8671-17192366524c","Type":"ContainerDied","Data":"5010caf8532eaa5253e9fef1334791744145e99520c3c2b151eadd88079fbb1f"} Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.572332 5010 scope.go:117] "RemoveContainer" containerID="a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.572272 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-sjfsz" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.607373 5010 scope.go:117] "RemoveContainer" containerID="d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.624496 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-healthmanager-rs97n" podStartSLOduration=5.624467983 podStartE2EDuration="5.624467983s" podCreationTimestamp="2025-11-26 17:11:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:11:53.603616815 +0000 UTC m=+6334.394333953" watchObservedRunningTime="2025-11-26 17:11:53.624467983 +0000 UTC m=+6334.415185151" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.637147 5010 scope.go:117] "RemoveContainer" containerID="abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.637426 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjfsz"] Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.649181 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-sjfsz"] Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.664915 5010 scope.go:117] "RemoveContainer" containerID="a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b" Nov 26 17:11:53 crc kubenswrapper[5010]: E1126 17:11:53.667959 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b\": container with ID starting with a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b not found: ID does not exist" containerID="a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.668004 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b"} err="failed to get container status \"a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b\": rpc error: code = NotFound desc = could not find container \"a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b\": container with ID starting with a1088b2faecda856ad4245aa0987ffe094e6326ddf6acf67948bdeeda4deff0b not found: ID does not exist" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.668291 5010 scope.go:117] "RemoveContainer" containerID="d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784" Nov 26 17:11:53 crc kubenswrapper[5010]: E1126 17:11:53.668778 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784\": container with ID starting with d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784 not found: ID does not exist" containerID="d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.668814 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784"} err="failed to get container status \"d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784\": rpc error: code = NotFound desc = could not find container \"d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784\": container with ID starting with d7e3acee6c202109f1f3366da48ce0c5b8730790b281ff13fddbbb5f3e2c7784 not found: ID does not exist" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.668843 5010 scope.go:117] "RemoveContainer" containerID="abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596" Nov 26 17:11:53 crc kubenswrapper[5010]: E1126 17:11:53.669225 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596\": container with ID starting with abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596 not found: ID does not exist" containerID="abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.669260 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596"} err="failed to get container status \"abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596\": rpc error: code = NotFound desc = could not find container \"abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596\": container with ID starting with abd460ad8e7e2be12f729554a9dd30876ac2453d164c5a2fe827a10c4f9f9596 not found: ID does not exist" Nov 26 17:11:53 crc kubenswrapper[5010]: I1126 17:11:53.909200 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0bd0a59c-254c-4822-8671-17192366524c" path="/var/lib/kubelet/pods/0bd0a59c-254c-4822-8671-17192366524c/volumes" Nov 26 17:11:54 crc kubenswrapper[5010]: E1126 17:11:54.236389 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d881ca5_eedc_4457_a85e_252ebb895dc3.slice/crio-0f1feca0d3c4463923ffccb62a6eb23c1ab64a97d95993e97a4022ba7224924d.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:11:54 crc kubenswrapper[5010]: I1126 17:11:54.587675 5010 generic.go:334] "Generic (PLEG): container finished" podID="4d881ca5-eedc-4457-a85e-252ebb895dc3" containerID="0f1feca0d3c4463923ffccb62a6eb23c1ab64a97d95993e97a4022ba7224924d" exitCode=0 Nov 26 17:11:54 crc kubenswrapper[5010]: I1126 17:11:54.587808 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-hr695" event={"ID":"4d881ca5-eedc-4457-a85e-252ebb895dc3","Type":"ContainerDied","Data":"0f1feca0d3c4463923ffccb62a6eb23c1ab64a97d95993e97a4022ba7224924d"} Nov 26 17:11:55 crc kubenswrapper[5010]: I1126 17:11:55.598985 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-housekeeping-hr695" event={"ID":"4d881ca5-eedc-4457-a85e-252ebb895dc3","Type":"ContainerStarted","Data":"e99ea49b4ffffa5702511555e878a09325d4b1e43b3632c396e1c0f9212f0998"} Nov 26 17:11:55 crc kubenswrapper[5010]: I1126 17:11:55.599257 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-housekeeping-hr695" Nov 26 17:11:55 crc kubenswrapper[5010]: I1126 17:11:55.600846 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-92kdl" event={"ID":"dc7aa8a9-668c-485c-ad2b-6ba848d528b7","Type":"ContainerStarted","Data":"33cce03ce33dcaed88f406c8f7bd56670611d36b70e9b8e5fa72b94f647910d3"} Nov 26 17:11:55 crc kubenswrapper[5010]: I1126 17:11:55.620604 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-housekeeping-hr695" podStartSLOduration=4.102188391 podStartE2EDuration="5.620586105s" podCreationTimestamp="2025-11-26 17:11:50 +0000 UTC" firstStartedPulling="2025-11-26 17:11:51.323849059 +0000 UTC m=+6332.114566207" lastFinishedPulling="2025-11-26 17:11:52.842246773 +0000 UTC m=+6333.632963921" observedRunningTime="2025-11-26 17:11:55.618683318 +0000 UTC m=+6336.409400486" watchObservedRunningTime="2025-11-26 17:11:55.620586105 +0000 UTC m=+6336.411303253" Nov 26 17:11:56 crc kubenswrapper[5010]: I1126 17:11:56.621175 5010 generic.go:334] "Generic (PLEG): container finished" podID="dc7aa8a9-668c-485c-ad2b-6ba848d528b7" containerID="33cce03ce33dcaed88f406c8f7bd56670611d36b70e9b8e5fa72b94f647910d3" exitCode=0 Nov 26 17:11:56 crc kubenswrapper[5010]: I1126 17:11:56.621319 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-92kdl" event={"ID":"dc7aa8a9-668c-485c-ad2b-6ba848d528b7","Type":"ContainerDied","Data":"33cce03ce33dcaed88f406c8f7bd56670611d36b70e9b8e5fa72b94f647910d3"} Nov 26 17:11:57 crc kubenswrapper[5010]: I1126 17:11:57.666053 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/octavia-worker-92kdl" event={"ID":"dc7aa8a9-668c-485c-ad2b-6ba848d528b7","Type":"ContainerStarted","Data":"3612096ccdcd3965bdf1e7015795e68efab0a30578b2a08f1b4f31b1bfcd1d87"} Nov 26 17:11:57 crc kubenswrapper[5010]: I1126 17:11:57.666557 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/octavia-worker-92kdl" Nov 26 17:11:57 crc kubenswrapper[5010]: I1126 17:11:57.689545 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/octavia-worker-92kdl" podStartSLOduration=5.100409875 podStartE2EDuration="6.689523037s" podCreationTimestamp="2025-11-26 17:11:51 +0000 UTC" firstStartedPulling="2025-11-26 17:11:52.819522458 +0000 UTC m=+6333.610239606" lastFinishedPulling="2025-11-26 17:11:54.40863562 +0000 UTC m=+6335.199352768" observedRunningTime="2025-11-26 17:11:57.686067941 +0000 UTC m=+6338.476785089" watchObservedRunningTime="2025-11-26 17:11:57.689523037 +0000 UTC m=+6338.480240185" Nov 26 17:12:03 crc kubenswrapper[5010]: I1126 17:12:03.754889 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-healthmanager-rs97n" Nov 26 17:12:05 crc kubenswrapper[5010]: I1126 17:12:05.809458 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-housekeeping-hr695" Nov 26 17:12:07 crc kubenswrapper[5010]: I1126 17:12:07.002636 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/octavia-worker-92kdl" Nov 26 17:12:10 crc kubenswrapper[5010]: I1126 17:12:10.051272 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-kvm86"] Nov 26 17:12:10 crc kubenswrapper[5010]: I1126 17:12:10.064111 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-93db-account-create-update-5xvw8"] Nov 26 17:12:10 crc kubenswrapper[5010]: I1126 17:12:10.082561 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-kvm86"] Nov 26 17:12:10 crc kubenswrapper[5010]: I1126 17:12:10.095938 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-93db-account-create-update-5xvw8"] Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.425042 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.425441 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.425508 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.427923 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.428052 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" gracePeriod=600 Nov 26 17:12:11 crc kubenswrapper[5010]: E1126 17:12:11.573044 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.814117 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" exitCode=0 Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.814167 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f"} Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.814221 5010 scope.go:117] "RemoveContainer" containerID="4b598b46f2cf6c5daaf375b8d9dc8672aba51e2bbf338cbfbf04472a425972f5" Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.814989 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:12:11 crc kubenswrapper[5010]: E1126 17:12:11.815505 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.904361 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="94b20deb-30ad-476a-88e4-983c09558bd8" path="/var/lib/kubelet/pods/94b20deb-30ad-476a-88e4-983c09558bd8/volumes" Nov 26 17:12:11 crc kubenswrapper[5010]: I1126 17:12:11.905108 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb1a8606-74e2-4480-8c23-45610b2761f9" path="/var/lib/kubelet/pods/fb1a8606-74e2-4480-8c23-45610b2761f9/volumes" Nov 26 17:12:17 crc kubenswrapper[5010]: I1126 17:12:17.046486 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-r56n8"] Nov 26 17:12:17 crc kubenswrapper[5010]: I1126 17:12:17.060535 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-r56n8"] Nov 26 17:12:17 crc kubenswrapper[5010]: I1126 17:12:17.904955 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50cda769-1fdf-4bf8-9f93-1ac966b885ab" path="/var/lib/kubelet/pods/50cda769-1fdf-4bf8-9f93-1ac966b885ab/volumes" Nov 26 17:12:24 crc kubenswrapper[5010]: E1126 17:12:24.958589 5010 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.154:49928->38.102.83.154:42721: read tcp 38.102.83.154:49928->38.102.83.154:42721: read: connection reset by peer Nov 26 17:12:25 crc kubenswrapper[5010]: I1126 17:12:25.892078 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:12:25 crc kubenswrapper[5010]: E1126 17:12:25.892805 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:12:40 crc kubenswrapper[5010]: I1126 17:12:40.894043 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:12:40 crc kubenswrapper[5010]: E1126 17:12:40.894993 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:12:43 crc kubenswrapper[5010]: I1126 17:12:43.046526 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-b951-account-create-update-rck95"] Nov 26 17:12:43 crc kubenswrapper[5010]: I1126 17:12:43.057017 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-t2jh7"] Nov 26 17:12:43 crc kubenswrapper[5010]: I1126 17:12:43.069140 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-t2jh7"] Nov 26 17:12:43 crc kubenswrapper[5010]: I1126 17:12:43.078925 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-b951-account-create-update-rck95"] Nov 26 17:12:43 crc kubenswrapper[5010]: I1126 17:12:43.902841 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="406b38b3-8408-49c9-88ee-bf1c2f8a85d9" path="/var/lib/kubelet/pods/406b38b3-8408-49c9-88ee-bf1c2f8a85d9/volumes" Nov 26 17:12:43 crc kubenswrapper[5010]: I1126 17:12:43.904240 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45abb0ea-b89d-4041-9371-6c1433aa3123" path="/var/lib/kubelet/pods/45abb0ea-b89d-4041-9371-6c1433aa3123/volumes" Nov 26 17:12:52 crc kubenswrapper[5010]: I1126 17:12:52.030849 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-n8w48"] Nov 26 17:12:52 crc kubenswrapper[5010]: I1126 17:12:52.041770 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-n8w48"] Nov 26 17:12:53 crc kubenswrapper[5010]: I1126 17:12:53.909938 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da992071-826e-44cd-83f6-2c190a5a73f6" path="/var/lib/kubelet/pods/da992071-826e-44cd-83f6-2c190a5a73f6/volumes" Nov 26 17:12:54 crc kubenswrapper[5010]: I1126 17:12:54.894659 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:12:54 crc kubenswrapper[5010]: E1126 17:12:54.900124 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.950607 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-75ccc46d5-rl4jj"] Nov 26 17:12:57 crc kubenswrapper[5010]: E1126 17:12:57.954827 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="extract-content" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.954854 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="extract-content" Nov 26 17:12:57 crc kubenswrapper[5010]: E1126 17:12:57.954912 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="extract-utilities" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.954921 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="extract-utilities" Nov 26 17:12:57 crc kubenswrapper[5010]: E1126 17:12:57.954935 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="registry-server" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.954943 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="registry-server" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.955167 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bd0a59c-254c-4822-8671-17192366524c" containerName="registry-server" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.956536 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.960027 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-frfz8" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.960160 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.960196 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.965126 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 26 17:12:57 crc kubenswrapper[5010]: I1126 17:12:57.966488 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-75ccc46d5-rl4jj"] Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.004109 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-logs\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.004161 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-scripts\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.004244 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-config-data\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.004382 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-horizon-secret-key\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.004441 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-js6dr\" (UniqueName: \"kubernetes.io/projected/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-kube-api-access-js6dr\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.022874 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.023155 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-log" containerID="cri-o://8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3" gracePeriod=30 Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.023221 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-httpd" containerID="cri-o://11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec" gracePeriod=30 Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.061090 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-79c979d7fc-z28p4"] Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.063497 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.077911 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79c979d7fc-z28p4"] Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.109239 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-config-data\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.109345 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-horizon-secret-key\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.109383 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-js6dr\" (UniqueName: \"kubernetes.io/projected/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-kube-api-access-js6dr\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.109452 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-logs\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.109483 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-scripts\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.110517 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-scripts\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.111098 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-logs\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.111868 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-config-data\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.118246 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-horizon-secret-key\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.131132 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-js6dr\" (UniqueName: \"kubernetes.io/projected/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-kube-api-access-js6dr\") pod \"horizon-75ccc46d5-rl4jj\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.162169 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.162518 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-log" containerID="cri-o://612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317" gracePeriod=30 Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.163180 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-httpd" containerID="cri-o://be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0" gracePeriod=30 Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.211085 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jlpl\" (UniqueName: \"kubernetes.io/projected/071ac2f8-0e86-473e-b024-253a9d667774-kube-api-access-4jlpl\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.211269 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-config-data\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.211328 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071ac2f8-0e86-473e-b024-253a9d667774-logs\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.211559 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-scripts\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.211656 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/071ac2f8-0e86-473e-b024-253a9d667774-horizon-secret-key\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.292487 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.314089 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-config-data\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.314182 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071ac2f8-0e86-473e-b024-253a9d667774-logs\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.314262 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-scripts\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.314313 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/071ac2f8-0e86-473e-b024-253a9d667774-horizon-secret-key\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.314377 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jlpl\" (UniqueName: \"kubernetes.io/projected/071ac2f8-0e86-473e-b024-253a9d667774-kube-api-access-4jlpl\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.314875 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071ac2f8-0e86-473e-b024-253a9d667774-logs\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.315136 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-scripts\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.315484 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-config-data\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.319896 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/071ac2f8-0e86-473e-b024-253a9d667774-horizon-secret-key\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.338206 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jlpl\" (UniqueName: \"kubernetes.io/projected/071ac2f8-0e86-473e-b024-253a9d667774-kube-api-access-4jlpl\") pod \"horizon-79c979d7fc-z28p4\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.351109 5010 generic.go:334] "Generic (PLEG): container finished" podID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerID="8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3" exitCode=143 Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.351164 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ccadce3b-18c9-4b3a-b06f-4f810ef81554","Type":"ContainerDied","Data":"8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3"} Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.353180 5010 generic.go:334] "Generic (PLEG): container finished" podID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerID="612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317" exitCode=143 Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.353208 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6fbf3e-afa8-4d34-965a-8ba491a85e81","Type":"ContainerDied","Data":"612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317"} Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.388585 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.787306 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-75ccc46d5-rl4jj"] Nov 26 17:12:58 crc kubenswrapper[5010]: I1126 17:12:58.903250 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79c979d7fc-z28p4"] Nov 26 17:12:58 crc kubenswrapper[5010]: W1126 17:12:58.917850 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod071ac2f8_0e86_473e_b024_253a9d667774.slice/crio-dc66dd668eed422b947c8ad1ecf8a65e3bf8c43a1887cb4871b355f3460087e2 WatchSource:0}: Error finding container dc66dd668eed422b947c8ad1ecf8a65e3bf8c43a1887cb4871b355f3460087e2: Status 404 returned error can't find the container with id dc66dd668eed422b947c8ad1ecf8a65e3bf8c43a1887cb4871b355f3460087e2 Nov 26 17:12:59 crc kubenswrapper[5010]: I1126 17:12:59.361982 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75ccc46d5-rl4jj" event={"ID":"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6","Type":"ContainerStarted","Data":"cc870ee15788d2b0b9921f0bfaba622841eac28c3fbee7b0d207bdb4ea74aa16"} Nov 26 17:12:59 crc kubenswrapper[5010]: I1126 17:12:59.362908 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79c979d7fc-z28p4" event={"ID":"071ac2f8-0e86-473e-b024-253a9d667774","Type":"ContainerStarted","Data":"dc66dd668eed422b947c8ad1ecf8a65e3bf8c43a1887cb4871b355f3460087e2"} Nov 26 17:12:59 crc kubenswrapper[5010]: I1126 17:12:59.982157 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-75ccc46d5-rl4jj"] Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.021813 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-d4b4bf468-nc42p"] Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.023579 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.054542 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-d4b4bf468-nc42p"] Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.056819 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.123497 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79c979d7fc-z28p4"] Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.151761 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-d8bcc7678-hw72b"] Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.154292 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.163121 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-tls-certs\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.163162 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-combined-ca-bundle\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.163201 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-secret-key\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.163222 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mrbq\" (UniqueName: \"kubernetes.io/projected/97333a10-733b-4dd1-bf22-fa0dd929a603-kube-api-access-4mrbq\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.163298 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97333a10-733b-4dd1-bf22-fa0dd929a603-logs\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.163316 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-config-data\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.163390 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-scripts\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.188248 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-d8bcc7678-hw72b"] Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.264938 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-combined-ca-bundle\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265008 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-secret-key\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265037 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mrbq\" (UniqueName: \"kubernetes.io/projected/97333a10-733b-4dd1-bf22-fa0dd929a603-kube-api-access-4mrbq\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265068 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-scripts\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265124 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-config-data\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265142 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97333a10-733b-4dd1-bf22-fa0dd929a603-logs\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265186 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-config-data\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265207 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-secret-key\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265227 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/646c3bd8-03a6-43c3-9226-9a68680d20e0-logs\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265246 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-combined-ca-bundle\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265279 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-scripts\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265300 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-tls-certs\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265362 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-tls-certs\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.265385 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72h2c\" (UniqueName: \"kubernetes.io/projected/646c3bd8-03a6-43c3-9226-9a68680d20e0-kube-api-access-72h2c\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.266260 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97333a10-733b-4dd1-bf22-fa0dd929a603-logs\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.266601 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-scripts\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.267483 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-config-data\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.270738 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-combined-ca-bundle\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.273224 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-tls-certs\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.279196 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-secret-key\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.284983 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mrbq\" (UniqueName: \"kubernetes.io/projected/97333a10-733b-4dd1-bf22-fa0dd929a603-kube-api-access-4mrbq\") pod \"horizon-d4b4bf468-nc42p\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367040 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-config-data\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367088 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-secret-key\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367115 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/646c3bd8-03a6-43c3-9226-9a68680d20e0-logs\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367142 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-combined-ca-bundle\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367188 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-tls-certs\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367227 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72h2c\" (UniqueName: \"kubernetes.io/projected/646c3bd8-03a6-43c3-9226-9a68680d20e0-kube-api-access-72h2c\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367280 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-scripts\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.367957 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-scripts\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.368234 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/646c3bd8-03a6-43c3-9226-9a68680d20e0-logs\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.368307 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-config-data\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.371062 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-secret-key\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.377642 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-tls-certs\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.378289 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-combined-ca-bundle\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.387375 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72h2c\" (UniqueName: \"kubernetes.io/projected/646c3bd8-03a6-43c3-9226-9a68680d20e0-kube-api-access-72h2c\") pod \"horizon-d8bcc7678-hw72b\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.389600 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.483131 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.918755 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-d4b4bf468-nc42p"] Nov 26 17:13:00 crc kubenswrapper[5010]: W1126 17:13:00.919375 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod97333a10_733b_4dd1_bf22_fa0dd929a603.slice/crio-042ba81fda05085d1b904e9515d920b66061534d06cd45297df2359e09f0367e WatchSource:0}: Error finding container 042ba81fda05085d1b904e9515d920b66061534d06cd45297df2359e09f0367e: Status 404 returned error can't find the container with id 042ba81fda05085d1b904e9515d920b66061534d06cd45297df2359e09f0367e Nov 26 17:13:00 crc kubenswrapper[5010]: W1126 17:13:00.982035 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod646c3bd8_03a6_43c3_9226_9a68680d20e0.slice/crio-2a9e9f956345f04e6bf386f1505739f0788f6f0eb103b65024d34f0d5e4f12b2 WatchSource:0}: Error finding container 2a9e9f956345f04e6bf386f1505739f0788f6f0eb103b65024d34f0d5e4f12b2: Status 404 returned error can't find the container with id 2a9e9f956345f04e6bf386f1505739f0788f6f0eb103b65024d34f0d5e4f12b2 Nov 26 17:13:00 crc kubenswrapper[5010]: I1126 17:13:00.982289 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-d8bcc7678-hw72b"] Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.389661 5010 generic.go:334] "Generic (PLEG): container finished" podID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerID="11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec" exitCode=0 Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.389779 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ccadce3b-18c9-4b3a-b06f-4f810ef81554","Type":"ContainerDied","Data":"11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec"} Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.391310 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8bcc7678-hw72b" event={"ID":"646c3bd8-03a6-43c3-9226-9a68680d20e0","Type":"ContainerStarted","Data":"2a9e9f956345f04e6bf386f1505739f0788f6f0eb103b65024d34f0d5e4f12b2"} Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.392597 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d4b4bf468-nc42p" event={"ID":"97333a10-733b-4dd1-bf22-fa0dd929a603","Type":"ContainerStarted","Data":"042ba81fda05085d1b904e9515d920b66061534d06cd45297df2359e09f0367e"} Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.799167 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.906890 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-combined-ca-bundle\") pod \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.906980 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-scripts\") pod \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.907129 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-config-data\") pod \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.907159 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h7k6x\" (UniqueName: \"kubernetes.io/projected/ccadce3b-18c9-4b3a-b06f-4f810ef81554-kube-api-access-h7k6x\") pod \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.907236 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-httpd-run\") pod \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.907321 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-public-tls-certs\") pod \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.907403 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-logs\") pod \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\" (UID: \"ccadce3b-18c9-4b3a-b06f-4f810ef81554\") " Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.909735 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-logs" (OuterVolumeSpecName: "logs") pod "ccadce3b-18c9-4b3a-b06f-4f810ef81554" (UID: "ccadce3b-18c9-4b3a-b06f-4f810ef81554"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.909848 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.913010 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-scripts" (OuterVolumeSpecName: "scripts") pod "ccadce3b-18c9-4b3a-b06f-4f810ef81554" (UID: "ccadce3b-18c9-4b3a-b06f-4f810ef81554"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.913410 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "ccadce3b-18c9-4b3a-b06f-4f810ef81554" (UID: "ccadce3b-18c9-4b3a-b06f-4f810ef81554"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.917139 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccadce3b-18c9-4b3a-b06f-4f810ef81554-kube-api-access-h7k6x" (OuterVolumeSpecName: "kube-api-access-h7k6x") pod "ccadce3b-18c9-4b3a-b06f-4f810ef81554" (UID: "ccadce3b-18c9-4b3a-b06f-4f810ef81554"). InnerVolumeSpecName "kube-api-access-h7k6x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.956612 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ccadce3b-18c9-4b3a-b06f-4f810ef81554" (UID: "ccadce3b-18c9-4b3a-b06f-4f810ef81554"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:01 crc kubenswrapper[5010]: I1126 17:13:01.995445 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ccadce3b-18c9-4b3a-b06f-4f810ef81554" (UID: "ccadce3b-18c9-4b3a-b06f-4f810ef81554"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.009563 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-logs\") pod \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.009738 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fg789\" (UniqueName: \"kubernetes.io/projected/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-kube-api-access-fg789\") pod \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.009862 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-scripts\") pod \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.009905 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-config-data\") pod \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.009993 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-internal-tls-certs\") pod \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010025 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-combined-ca-bundle\") pod \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010066 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-httpd-run\") pod \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\" (UID: \"dd6fbf3e-afa8-4d34-965a-8ba491a85e81\") " Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010885 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010903 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h7k6x\" (UniqueName: \"kubernetes.io/projected/ccadce3b-18c9-4b3a-b06f-4f810ef81554-kube-api-access-h7k6x\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010919 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010930 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010942 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ccadce3b-18c9-4b3a-b06f-4f810ef81554-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.010953 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.012215 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-logs" (OuterVolumeSpecName: "logs") pod "dd6fbf3e-afa8-4d34-965a-8ba491a85e81" (UID: "dd6fbf3e-afa8-4d34-965a-8ba491a85e81"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.012516 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "dd6fbf3e-afa8-4d34-965a-8ba491a85e81" (UID: "dd6fbf3e-afa8-4d34-965a-8ba491a85e81"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.018777 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-kube-api-access-fg789" (OuterVolumeSpecName: "kube-api-access-fg789") pod "dd6fbf3e-afa8-4d34-965a-8ba491a85e81" (UID: "dd6fbf3e-afa8-4d34-965a-8ba491a85e81"). InnerVolumeSpecName "kube-api-access-fg789". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.020059 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-scripts" (OuterVolumeSpecName: "scripts") pod "dd6fbf3e-afa8-4d34-965a-8ba491a85e81" (UID: "dd6fbf3e-afa8-4d34-965a-8ba491a85e81"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.047517 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-config-data" (OuterVolumeSpecName: "config-data") pod "ccadce3b-18c9-4b3a-b06f-4f810ef81554" (UID: "ccadce3b-18c9-4b3a-b06f-4f810ef81554"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.061895 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd6fbf3e-afa8-4d34-965a-8ba491a85e81" (UID: "dd6fbf3e-afa8-4d34-965a-8ba491a85e81"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.072019 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "dd6fbf3e-afa8-4d34-965a-8ba491a85e81" (UID: "dd6fbf3e-afa8-4d34-965a-8ba491a85e81"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.092696 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-config-data" (OuterVolumeSpecName: "config-data") pod "dd6fbf3e-afa8-4d34-965a-8ba491a85e81" (UID: "dd6fbf3e-afa8-4d34-965a-8ba491a85e81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113235 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113282 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fg789\" (UniqueName: \"kubernetes.io/projected/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-kube-api-access-fg789\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113297 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ccadce3b-18c9-4b3a-b06f-4f810ef81554-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113311 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113322 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113334 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113347 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.113550 5010 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/dd6fbf3e-afa8-4d34-965a-8ba491a85e81-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.406437 5010 generic.go:334] "Generic (PLEG): container finished" podID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerID="be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0" exitCode=0 Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.406504 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6fbf3e-afa8-4d34-965a-8ba491a85e81","Type":"ContainerDied","Data":"be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0"} Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.406517 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.406530 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"dd6fbf3e-afa8-4d34-965a-8ba491a85e81","Type":"ContainerDied","Data":"edbfd8f9f544d5e29ef14c0fbbb21c805b25797159dcc17cb03395ac3514338d"} Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.406549 5010 scope.go:117] "RemoveContainer" containerID="be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.409060 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"ccadce3b-18c9-4b3a-b06f-4f810ef81554","Type":"ContainerDied","Data":"0e8b55217f5a3c34dabf3fac70f2e72b41a3796443cafceb4f67c2b140ae191c"} Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.409103 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.448266 5010 scope.go:117] "RemoveContainer" containerID="612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.459423 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.478526 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.508507 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: E1126 17:13:02.509073 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-log" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.509169 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-log" Nov 26 17:13:02 crc kubenswrapper[5010]: E1126 17:13:02.509270 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-httpd" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.509334 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-httpd" Nov 26 17:13:02 crc kubenswrapper[5010]: E1126 17:13:02.509426 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-httpd" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.509486 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-httpd" Nov 26 17:13:02 crc kubenswrapper[5010]: E1126 17:13:02.509548 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-log" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.509597 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-log" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.509895 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-httpd" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.509987 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" containerName="glance-log" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.510063 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-httpd" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.510159 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" containerName="glance-log" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.511242 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.515239 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.515450 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.515751 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-7hf2b" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.515869 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.517144 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.526538 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.537284 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.538937 5010 scope.go:117] "RemoveContainer" containerID="be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0" Nov 26 17:13:02 crc kubenswrapper[5010]: E1126 17:13:02.539891 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0\": container with ID starting with be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0 not found: ID does not exist" containerID="be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.539936 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0"} err="failed to get container status \"be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0\": rpc error: code = NotFound desc = could not find container \"be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0\": container with ID starting with be48eeb3097fca20fa4855afd4ddab6101532a183ac8d5e03a53e68b1604b5a0 not found: ID does not exist" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.539963 5010 scope.go:117] "RemoveContainer" containerID="612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317" Nov 26 17:13:02 crc kubenswrapper[5010]: E1126 17:13:02.541965 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317\": container with ID starting with 612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317 not found: ID does not exist" containerID="612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.542054 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317"} err="failed to get container status \"612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317\": rpc error: code = NotFound desc = could not find container \"612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317\": container with ID starting with 612cc433a3231a0251e51a7ba026c7039ec867d6940a1dd09742bab808c40317 not found: ID does not exist" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.542087 5010 scope.go:117] "RemoveContainer" containerID="11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.552157 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.557158 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.557553 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.564201 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.564248 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624231 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624280 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8aacff2-d50d-4892-980a-6d708f73e1e4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624315 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8aacff2-d50d-4892-980a-6d708f73e1e4-logs\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624341 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-logs\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624362 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624388 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624413 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-scripts\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624432 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624497 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624539 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624580 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw2fb\" (UniqueName: \"kubernetes.io/projected/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-kube-api-access-bw2fb\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624610 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-config-data\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624639 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9zvt\" (UniqueName: \"kubernetes.io/projected/c8aacff2-d50d-4892-980a-6d708f73e1e4-kube-api-access-p9zvt\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.624690 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.726773 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727155 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw2fb\" (UniqueName: \"kubernetes.io/projected/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-kube-api-access-bw2fb\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727196 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-config-data\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727235 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9zvt\" (UniqueName: \"kubernetes.io/projected/c8aacff2-d50d-4892-980a-6d708f73e1e4-kube-api-access-p9zvt\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727289 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727328 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727351 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727374 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8aacff2-d50d-4892-980a-6d708f73e1e4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727405 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8aacff2-d50d-4892-980a-6d708f73e1e4-logs\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727430 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-logs\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727452 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727478 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727503 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-scripts\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727524 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727564 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.727776 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c8aacff2-d50d-4892-980a-6d708f73e1e4-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.728306 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-logs\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.728402 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c8aacff2-d50d-4892-980a-6d708f73e1e4-logs\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.732036 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.732082 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-scripts\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.732610 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.733414 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.733445 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.735805 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c8aacff2-d50d-4892-980a-6d708f73e1e4-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.746566 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.746591 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw2fb\" (UniqueName: \"kubernetes.io/projected/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-kube-api-access-bw2fb\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.747288 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9zvt\" (UniqueName: \"kubernetes.io/projected/c8aacff2-d50d-4892-980a-6d708f73e1e4-kube-api-access-p9zvt\") pod \"glance-default-internal-api-0\" (UID: \"c8aacff2-d50d-4892-980a-6d708f73e1e4\") " pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.749057 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9d6d21b5-fc5a-45ba-a975-f5bc02271e5f-config-data\") pod \"glance-default-external-api-0\" (UID: \"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f\") " pod="openstack/glance-default-external-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.843136 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:02 crc kubenswrapper[5010]: I1126 17:13:02.882937 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 26 17:13:03 crc kubenswrapper[5010]: I1126 17:13:03.912481 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccadce3b-18c9-4b3a-b06f-4f810ef81554" path="/var/lib/kubelet/pods/ccadce3b-18c9-4b3a-b06f-4f810ef81554/volumes" Nov 26 17:13:03 crc kubenswrapper[5010]: I1126 17:13:03.913643 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd6fbf3e-afa8-4d34-965a-8ba491a85e81" path="/var/lib/kubelet/pods/dd6fbf3e-afa8-4d34-965a-8ba491a85e81/volumes" Nov 26 17:13:07 crc kubenswrapper[5010]: I1126 17:13:07.115106 5010 scope.go:117] "RemoveContainer" containerID="1f878451f9937ca0ca2e65f23b817b0e59ac96dfcdfdc005a1de740391f61c16" Nov 26 17:13:07 crc kubenswrapper[5010]: I1126 17:13:07.905311 5010 scope.go:117] "RemoveContainer" containerID="11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec" Nov 26 17:13:07 crc kubenswrapper[5010]: I1126 17:13:07.947152 5010 scope.go:117] "RemoveContainer" containerID="8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3" Nov 26 17:13:07 crc kubenswrapper[5010]: E1126 17:13:07.970052 5010 log.go:32] "RemoveContainer from runtime service failed" err="rpc error: code = Unknown desc = failed to delete container k8s_glance-httpd_glance-default-external-api-0_openstack_ccadce3b-18c9-4b3a-b06f-4f810ef81554_0 in pod sandbox 0e8b55217f5a3c34dabf3fac70f2e72b41a3796443cafceb4f67c2b140ae191c from index: no such id: '11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec'" containerID="11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec" Nov 26 17:13:07 crc kubenswrapper[5010]: E1126 17:13:07.970133 5010 kuberuntime_gc.go:150] "Failed to remove container" err="rpc error: code = Unknown desc = failed to delete container k8s_glance-httpd_glance-default-external-api-0_openstack_ccadce3b-18c9-4b3a-b06f-4f810ef81554_0 in pod sandbox 0e8b55217f5a3c34dabf3fac70f2e72b41a3796443cafceb4f67c2b140ae191c from index: no such id: '11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec'" containerID="11542e647fbc67f6409945610606b4c188cc03a35d24521b69eb67c64cb0cfec" Nov 26 17:13:07 crc kubenswrapper[5010]: I1126 17:13:07.970170 5010 scope.go:117] "RemoveContainer" containerID="1663156357ed77d79325bdcb551b5e9315b982db39ccf9ab300103426d7c4c3b" Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.155008 5010 scope.go:117] "RemoveContainer" containerID="858004dceebc4f3004ebfeca114387f89ecd904d9d305bdd635b1a180f4ee9a8" Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.238349 5010 scope.go:117] "RemoveContainer" containerID="ab3bf55cbefcc6c1b5b29a6a8cee042efff1b282e105f6f8745610a99cc06ae7" Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.300920 5010 scope.go:117] "RemoveContainer" containerID="61375f021fb229dc2350ee3a57be8bea7576eff82ae49749769a6984bbb22f84" Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.333267 5010 scope.go:117] "RemoveContainer" containerID="8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3" Nov 26 17:13:08 crc kubenswrapper[5010]: E1126 17:13:08.333792 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3\": container with ID starting with 8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3 not found: ID does not exist" containerID="8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3" Nov 26 17:13:08 crc kubenswrapper[5010]: E1126 17:13:08.333848 5010 kuberuntime_gc.go:150] "Failed to remove container" err="failed to get container status \"8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3\": rpc error: code = NotFound desc = could not find container \"8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3\": container with ID starting with 8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3 not found: ID does not exist" containerID="8066544c17105f3d17c9128b5b19e66c207eb107d5f00c4c6a87da63c299acf3" Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.333876 5010 scope.go:117] "RemoveContainer" containerID="8ac5149bc067ad8522373358e51efa7482eee091b169d6ce0826dacb4bdadadc" Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.481483 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79c979d7fc-z28p4" event={"ID":"071ac2f8-0e86-473e-b024-253a9d667774","Type":"ContainerStarted","Data":"8a9ae17d0c72b76154a49886af38ad1a9c7350ef965d8dc00862fddf8a25126a"} Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.498863 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8bcc7678-hw72b" event={"ID":"646c3bd8-03a6-43c3-9226-9a68680d20e0","Type":"ContainerStarted","Data":"5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1"} Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.503624 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75ccc46d5-rl4jj" event={"ID":"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6","Type":"ContainerStarted","Data":"f0ce7d609b7be530c9945684c6da15b9f3e19ea0a62282cb812b93b4b5e153e9"} Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.509754 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d4b4bf468-nc42p" event={"ID":"97333a10-733b-4dd1-bf22-fa0dd929a603","Type":"ContainerStarted","Data":"c18815d65b2570321424e1b7990f1e9725d92081a423b40d728b0c7006509430"} Nov 26 17:13:08 crc kubenswrapper[5010]: W1126 17:13:08.595885 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d6d21b5_fc5a_45ba_a975_f5bc02271e5f.slice/crio-ed83b2d5098b4ec62a7d8711b935be28e547870ce0bd45d8d2a8975e014fc46c WatchSource:0}: Error finding container ed83b2d5098b4ec62a7d8711b935be28e547870ce0bd45d8d2a8975e014fc46c: Status 404 returned error can't find the container with id ed83b2d5098b4ec62a7d8711b935be28e547870ce0bd45d8d2a8975e014fc46c Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.597512 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 26 17:13:08 crc kubenswrapper[5010]: I1126 17:13:08.641464 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.523972 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8bcc7678-hw72b" event={"ID":"646c3bd8-03a6-43c3-9226-9a68680d20e0","Type":"ContainerStarted","Data":"423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.528257 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75ccc46d5-rl4jj" event={"ID":"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6","Type":"ContainerStarted","Data":"fefc0f5af22fccc349294b546fe32e9659f76140e8d5e461b4f2aef418202991"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.528335 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-75ccc46d5-rl4jj" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon-log" containerID="cri-o://f0ce7d609b7be530c9945684c6da15b9f3e19ea0a62282cb812b93b4b5e153e9" gracePeriod=30 Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.528394 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-75ccc46d5-rl4jj" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon" containerID="cri-o://fefc0f5af22fccc349294b546fe32e9659f76140e8d5e461b4f2aef418202991" gracePeriod=30 Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.530228 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f","Type":"ContainerStarted","Data":"2ea9611809b900fa23182ca5bb1ac21d596e94c45a640f857c2985c08cab4872"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.530589 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f","Type":"ContainerStarted","Data":"ed83b2d5098b4ec62a7d8711b935be28e547870ce0bd45d8d2a8975e014fc46c"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.533501 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d4b4bf468-nc42p" event={"ID":"97333a10-733b-4dd1-bf22-fa0dd929a603","Type":"ContainerStarted","Data":"7cfd97cd29773dd7b4c80ae6af261eb20becaea38e0cb84374e5637dd750e63f"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.536044 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c8aacff2-d50d-4892-980a-6d708f73e1e4","Type":"ContainerStarted","Data":"00c93bc51844e90736df051faa554f7bae17efd744288bf738bb28c2de175476"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.536082 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c8aacff2-d50d-4892-980a-6d708f73e1e4","Type":"ContainerStarted","Data":"a070c7becac5c4054467490511f83548696585304146106311fa18d6032164fa"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.538792 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79c979d7fc-z28p4" event={"ID":"071ac2f8-0e86-473e-b024-253a9d667774","Type":"ContainerStarted","Data":"40e1dd9331189ae872aac4a59837b1da1cb22706fb949eeeea321ca477e20888"} Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.538950 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-79c979d7fc-z28p4" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon-log" containerID="cri-o://8a9ae17d0c72b76154a49886af38ad1a9c7350ef965d8dc00862fddf8a25126a" gracePeriod=30 Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.539231 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-79c979d7fc-z28p4" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon" containerID="cri-o://40e1dd9331189ae872aac4a59837b1da1cb22706fb949eeeea321ca477e20888" gracePeriod=30 Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.565967 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-d8bcc7678-hw72b" podStartSLOduration=2.507839592 podStartE2EDuration="9.565948528s" podCreationTimestamp="2025-11-26 17:13:00 +0000 UTC" firstStartedPulling="2025-11-26 17:13:00.984043884 +0000 UTC m=+6401.774761032" lastFinishedPulling="2025-11-26 17:13:08.04215283 +0000 UTC m=+6408.832869968" observedRunningTime="2025-11-26 17:13:09.539620033 +0000 UTC m=+6410.330337191" watchObservedRunningTime="2025-11-26 17:13:09.565948528 +0000 UTC m=+6410.356665666" Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.586600 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-79c979d7fc-z28p4" podStartSLOduration=2.462869084 podStartE2EDuration="11.58657516s" podCreationTimestamp="2025-11-26 17:12:58 +0000 UTC" firstStartedPulling="2025-11-26 17:12:58.919680715 +0000 UTC m=+6399.710397863" lastFinishedPulling="2025-11-26 17:13:08.043386791 +0000 UTC m=+6408.834103939" observedRunningTime="2025-11-26 17:13:09.575009443 +0000 UTC m=+6410.365726601" watchObservedRunningTime="2025-11-26 17:13:09.58657516 +0000 UTC m=+6410.377292308" Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.603655 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-d4b4bf468-nc42p" podStartSLOduration=3.320695579 podStartE2EDuration="10.603638725s" podCreationTimestamp="2025-11-26 17:12:59 +0000 UTC" firstStartedPulling="2025-11-26 17:13:00.922027802 +0000 UTC m=+6401.712744950" lastFinishedPulling="2025-11-26 17:13:08.204970948 +0000 UTC m=+6408.995688096" observedRunningTime="2025-11-26 17:13:09.59216889 +0000 UTC m=+6410.382886038" watchObservedRunningTime="2025-11-26 17:13:09.603638725 +0000 UTC m=+6410.394355873" Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.668885 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-75ccc46d5-rl4jj" podStartSLOduration=3.41964441 podStartE2EDuration="12.668866417s" podCreationTimestamp="2025-11-26 17:12:57 +0000 UTC" firstStartedPulling="2025-11-26 17:12:58.793018765 +0000 UTC m=+6399.583735913" lastFinishedPulling="2025-11-26 17:13:08.042240772 +0000 UTC m=+6408.832957920" observedRunningTime="2025-11-26 17:13:09.663808111 +0000 UTC m=+6410.454525309" watchObservedRunningTime="2025-11-26 17:13:09.668866417 +0000 UTC m=+6410.459583565" Nov 26 17:13:09 crc kubenswrapper[5010]: I1126 17:13:09.907902 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:13:09 crc kubenswrapper[5010]: E1126 17:13:09.908253 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.390262 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.390563 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.483577 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.483626 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.552231 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"9d6d21b5-fc5a-45ba-a975-f5bc02271e5f","Type":"ContainerStarted","Data":"535e1ecaef49e2abf75698c71e615d22ffa56a829180320a1f7b7057af6ee192"} Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.555054 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c8aacff2-d50d-4892-980a-6d708f73e1e4","Type":"ContainerStarted","Data":"cb264970f06f34bd2e8bc28433e76f1a88ca95c99228df703745c4715bcd0844"} Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.573750 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.573732996 podStartE2EDuration="8.573732996s" podCreationTimestamp="2025-11-26 17:13:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:13:10.571681945 +0000 UTC m=+6411.362399113" watchObservedRunningTime="2025-11-26 17:13:10.573732996 +0000 UTC m=+6411.364450154" Nov 26 17:13:10 crc kubenswrapper[5010]: I1126 17:13:10.605452 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.605432794 podStartE2EDuration="8.605432794s" podCreationTimestamp="2025-11-26 17:13:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:13:10.603340962 +0000 UTC m=+6411.394058150" watchObservedRunningTime="2025-11-26 17:13:10.605432794 +0000 UTC m=+6411.396149942" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.844570 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.844990 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.884843 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.884914 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.887004 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.926924 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.928833 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 17:13:12 crc kubenswrapper[5010]: I1126 17:13:12.968785 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 26 17:13:13 crc kubenswrapper[5010]: I1126 17:13:13.582750 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:13 crc kubenswrapper[5010]: I1126 17:13:13.583111 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:13 crc kubenswrapper[5010]: I1126 17:13:13.583124 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 17:13:13 crc kubenswrapper[5010]: I1126 17:13:13.583134 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 26 17:13:16 crc kubenswrapper[5010]: I1126 17:13:16.903462 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:16 crc kubenswrapper[5010]: I1126 17:13:16.974262 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 17:13:16 crc kubenswrapper[5010]: I1126 17:13:16.974672 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 26 17:13:16 crc kubenswrapper[5010]: I1126 17:13:16.976074 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 26 17:13:18 crc kubenswrapper[5010]: I1126 17:13:18.292700 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:13:18 crc kubenswrapper[5010]: I1126 17:13:18.389260 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:13:20 crc kubenswrapper[5010]: I1126 17:13:20.392114 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-d4b4bf468-nc42p" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.133:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.133:8443: connect: connection refused" Nov 26 17:13:20 crc kubenswrapper[5010]: I1126 17:13:20.487730 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-d8bcc7678-hw72b" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.134:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.134:8443: connect: connection refused" Nov 26 17:13:22 crc kubenswrapper[5010]: I1126 17:13:22.892842 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:13:22 crc kubenswrapper[5010]: E1126 17:13:22.893534 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:13:32 crc kubenswrapper[5010]: I1126 17:13:32.240687 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:32 crc kubenswrapper[5010]: I1126 17:13:32.263106 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:34 crc kubenswrapper[5010]: I1126 17:13:34.013215 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:13:34 crc kubenswrapper[5010]: I1126 17:13:34.173677 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:13:34 crc kubenswrapper[5010]: I1126 17:13:34.232366 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-d4b4bf468-nc42p"] Nov 26 17:13:34 crc kubenswrapper[5010]: I1126 17:13:34.812920 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-d4b4bf468-nc42p" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon-log" containerID="cri-o://c18815d65b2570321424e1b7990f1e9725d92081a423b40d728b0c7006509430" gracePeriod=30 Nov 26 17:13:34 crc kubenswrapper[5010]: I1126 17:13:34.813483 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-d4b4bf468-nc42p" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" containerID="cri-o://7cfd97cd29773dd7b4c80ae6af261eb20becaea38e0cb84374e5637dd750e63f" gracePeriod=30 Nov 26 17:13:36 crc kubenswrapper[5010]: I1126 17:13:36.893098 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:13:36 crc kubenswrapper[5010]: E1126 17:13:36.894062 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:13:38 crc kubenswrapper[5010]: I1126 17:13:38.858340 5010 generic.go:334] "Generic (PLEG): container finished" podID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerID="7cfd97cd29773dd7b4c80ae6af261eb20becaea38e0cb84374e5637dd750e63f" exitCode=0 Nov 26 17:13:38 crc kubenswrapper[5010]: I1126 17:13:38.858406 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d4b4bf468-nc42p" event={"ID":"97333a10-733b-4dd1-bf22-fa0dd929a603","Type":"ContainerDied","Data":"7cfd97cd29773dd7b4c80ae6af261eb20becaea38e0cb84374e5637dd750e63f"} Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.875485 5010 generic.go:334] "Generic (PLEG): container finished" podID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerID="fefc0f5af22fccc349294b546fe32e9659f76140e8d5e461b4f2aef418202991" exitCode=137 Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.875965 5010 generic.go:334] "Generic (PLEG): container finished" podID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerID="f0ce7d609b7be530c9945684c6da15b9f3e19ea0a62282cb812b93b4b5e153e9" exitCode=137 Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.876017 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75ccc46d5-rl4jj" event={"ID":"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6","Type":"ContainerDied","Data":"fefc0f5af22fccc349294b546fe32e9659f76140e8d5e461b4f2aef418202991"} Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.876047 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75ccc46d5-rl4jj" event={"ID":"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6","Type":"ContainerDied","Data":"f0ce7d609b7be530c9945684c6da15b9f3e19ea0a62282cb812b93b4b5e153e9"} Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.878113 5010 generic.go:334] "Generic (PLEG): container finished" podID="071ac2f8-0e86-473e-b024-253a9d667774" containerID="40e1dd9331189ae872aac4a59837b1da1cb22706fb949eeeea321ca477e20888" exitCode=137 Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.878145 5010 generic.go:334] "Generic (PLEG): container finished" podID="071ac2f8-0e86-473e-b024-253a9d667774" containerID="8a9ae17d0c72b76154a49886af38ad1a9c7350ef965d8dc00862fddf8a25126a" exitCode=137 Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.878146 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79c979d7fc-z28p4" event={"ID":"071ac2f8-0e86-473e-b024-253a9d667774","Type":"ContainerDied","Data":"40e1dd9331189ae872aac4a59837b1da1cb22706fb949eeeea321ca477e20888"} Nov 26 17:13:39 crc kubenswrapper[5010]: I1126 17:13:39.878175 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79c979d7fc-z28p4" event={"ID":"071ac2f8-0e86-473e-b024-253a9d667774","Type":"ContainerDied","Data":"8a9ae17d0c72b76154a49886af38ad1a9c7350ef965d8dc00862fddf8a25126a"} Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.155299 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.164329 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282347 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/071ac2f8-0e86-473e-b024-253a9d667774-horizon-secret-key\") pod \"071ac2f8-0e86-473e-b024-253a9d667774\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282470 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-horizon-secret-key\") pod \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282504 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-scripts\") pod \"071ac2f8-0e86-473e-b024-253a9d667774\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282551 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-scripts\") pod \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282582 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-config-data\") pod \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282640 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4jlpl\" (UniqueName: \"kubernetes.io/projected/071ac2f8-0e86-473e-b024-253a9d667774-kube-api-access-4jlpl\") pod \"071ac2f8-0e86-473e-b024-253a9d667774\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282682 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-logs\") pod \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282742 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-js6dr\" (UniqueName: \"kubernetes.io/projected/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-kube-api-access-js6dr\") pod \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\" (UID: \"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282849 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071ac2f8-0e86-473e-b024-253a9d667774-logs\") pod \"071ac2f8-0e86-473e-b024-253a9d667774\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.282906 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-config-data\") pod \"071ac2f8-0e86-473e-b024-253a9d667774\" (UID: \"071ac2f8-0e86-473e-b024-253a9d667774\") " Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.284282 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-logs" (OuterVolumeSpecName: "logs") pod "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" (UID: "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.284340 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/071ac2f8-0e86-473e-b024-253a9d667774-logs" (OuterVolumeSpecName: "logs") pod "071ac2f8-0e86-473e-b024-253a9d667774" (UID: "071ac2f8-0e86-473e-b024-253a9d667774"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.288632 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/071ac2f8-0e86-473e-b024-253a9d667774-kube-api-access-4jlpl" (OuterVolumeSpecName: "kube-api-access-4jlpl") pod "071ac2f8-0e86-473e-b024-253a9d667774" (UID: "071ac2f8-0e86-473e-b024-253a9d667774"). InnerVolumeSpecName "kube-api-access-4jlpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.288637 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/071ac2f8-0e86-473e-b024-253a9d667774-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "071ac2f8-0e86-473e-b024-253a9d667774" (UID: "071ac2f8-0e86-473e-b024-253a9d667774"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.288924 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" (UID: "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.297007 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-kube-api-access-js6dr" (OuterVolumeSpecName: "kube-api-access-js6dr") pod "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" (UID: "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6"). InnerVolumeSpecName "kube-api-access-js6dr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.313080 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-scripts" (OuterVolumeSpecName: "scripts") pod "071ac2f8-0e86-473e-b024-253a9d667774" (UID: "071ac2f8-0e86-473e-b024-253a9d667774"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.313101 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-config-data" (OuterVolumeSpecName: "config-data") pod "071ac2f8-0e86-473e-b024-253a9d667774" (UID: "071ac2f8-0e86-473e-b024-253a9d667774"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.316330 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-config-data" (OuterVolumeSpecName: "config-data") pod "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" (UID: "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.321399 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-scripts" (OuterVolumeSpecName: "scripts") pod "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" (UID: "fe33350a-e0bc-434f-84a2-eeffaeeaa2d6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.385751 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/071ac2f8-0e86-473e-b024-253a9d667774-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.385809 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.385973 5010 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/071ac2f8-0e86-473e-b024-253a9d667774-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.385998 5010 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.386098 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/071ac2f8-0e86-473e-b024-253a9d667774-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.386120 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.386138 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.386155 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4jlpl\" (UniqueName: \"kubernetes.io/projected/071ac2f8-0e86-473e-b024-253a9d667774-kube-api-access-4jlpl\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.386171 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.386191 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-js6dr\" (UniqueName: \"kubernetes.io/projected/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6-kube-api-access-js6dr\") on node \"crc\" DevicePath \"\"" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.390841 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-d4b4bf468-nc42p" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.133:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.133:8443: connect: connection refused" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.888186 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-75ccc46d5-rl4jj" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.888350 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-75ccc46d5-rl4jj" event={"ID":"fe33350a-e0bc-434f-84a2-eeffaeeaa2d6","Type":"ContainerDied","Data":"cc870ee15788d2b0b9921f0bfaba622841eac28c3fbee7b0d207bdb4ea74aa16"} Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.888789 5010 scope.go:117] "RemoveContainer" containerID="fefc0f5af22fccc349294b546fe32e9659f76140e8d5e461b4f2aef418202991" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.892617 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79c979d7fc-z28p4" event={"ID":"071ac2f8-0e86-473e-b024-253a9d667774","Type":"ContainerDied","Data":"dc66dd668eed422b947c8ad1ecf8a65e3bf8c43a1887cb4871b355f3460087e2"} Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.892679 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79c979d7fc-z28p4" Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.944442 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-75ccc46d5-rl4jj"] Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.957445 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-75ccc46d5-rl4jj"] Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.966277 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79c979d7fc-z28p4"] Nov 26 17:13:40 crc kubenswrapper[5010]: I1126 17:13:40.974606 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-79c979d7fc-z28p4"] Nov 26 17:13:41 crc kubenswrapper[5010]: I1126 17:13:41.084007 5010 scope.go:117] "RemoveContainer" containerID="f0ce7d609b7be530c9945684c6da15b9f3e19ea0a62282cb812b93b4b5e153e9" Nov 26 17:13:41 crc kubenswrapper[5010]: I1126 17:13:41.117168 5010 scope.go:117] "RemoveContainer" containerID="40e1dd9331189ae872aac4a59837b1da1cb22706fb949eeeea321ca477e20888" Nov 26 17:13:41 crc kubenswrapper[5010]: I1126 17:13:41.335741 5010 scope.go:117] "RemoveContainer" containerID="8a9ae17d0c72b76154a49886af38ad1a9c7350ef965d8dc00862fddf8a25126a" Nov 26 17:13:41 crc kubenswrapper[5010]: I1126 17:13:41.911591 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="071ac2f8-0e86-473e-b024-253a9d667774" path="/var/lib/kubelet/pods/071ac2f8-0e86-473e-b024-253a9d667774/volumes" Nov 26 17:13:41 crc kubenswrapper[5010]: I1126 17:13:41.913098 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" path="/var/lib/kubelet/pods/fe33350a-e0bc-434f-84a2-eeffaeeaa2d6/volumes" Nov 26 17:13:50 crc kubenswrapper[5010]: I1126 17:13:50.391153 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-d4b4bf468-nc42p" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.133:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.133:8443: connect: connection refused" Nov 26 17:13:51 crc kubenswrapper[5010]: I1126 17:13:51.892996 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:13:51 crc kubenswrapper[5010]: E1126 17:13:51.894153 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:14:00 crc kubenswrapper[5010]: I1126 17:14:00.391047 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-d4b4bf468-nc42p" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.133:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.133:8443: connect: connection refused" Nov 26 17:14:00 crc kubenswrapper[5010]: I1126 17:14:00.391773 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:14:02 crc kubenswrapper[5010]: I1126 17:14:02.894246 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:14:02 crc kubenswrapper[5010]: E1126 17:14:02.895308 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.138285 5010 generic.go:334] "Generic (PLEG): container finished" podID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerID="c18815d65b2570321424e1b7990f1e9725d92081a423b40d728b0c7006509430" exitCode=137 Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.138392 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d4b4bf468-nc42p" event={"ID":"97333a10-733b-4dd1-bf22-fa0dd929a603","Type":"ContainerDied","Data":"c18815d65b2570321424e1b7990f1e9725d92081a423b40d728b0c7006509430"} Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.291041 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.403652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-scripts\") pod \"97333a10-733b-4dd1-bf22-fa0dd929a603\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.403862 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-tls-certs\") pod \"97333a10-733b-4dd1-bf22-fa0dd929a603\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.403962 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-combined-ca-bundle\") pod \"97333a10-733b-4dd1-bf22-fa0dd929a603\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.404131 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-secret-key\") pod \"97333a10-733b-4dd1-bf22-fa0dd929a603\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.404263 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97333a10-733b-4dd1-bf22-fa0dd929a603-logs\") pod \"97333a10-733b-4dd1-bf22-fa0dd929a603\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.404394 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mrbq\" (UniqueName: \"kubernetes.io/projected/97333a10-733b-4dd1-bf22-fa0dd929a603-kube-api-access-4mrbq\") pod \"97333a10-733b-4dd1-bf22-fa0dd929a603\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.404493 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-config-data\") pod \"97333a10-733b-4dd1-bf22-fa0dd929a603\" (UID: \"97333a10-733b-4dd1-bf22-fa0dd929a603\") " Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.405204 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/97333a10-733b-4dd1-bf22-fa0dd929a603-logs" (OuterVolumeSpecName: "logs") pod "97333a10-733b-4dd1-bf22-fa0dd929a603" (UID: "97333a10-733b-4dd1-bf22-fa0dd929a603"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.419047 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/97333a10-733b-4dd1-bf22-fa0dd929a603-kube-api-access-4mrbq" (OuterVolumeSpecName: "kube-api-access-4mrbq") pod "97333a10-733b-4dd1-bf22-fa0dd929a603" (UID: "97333a10-733b-4dd1-bf22-fa0dd929a603"). InnerVolumeSpecName "kube-api-access-4mrbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.420097 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "97333a10-733b-4dd1-bf22-fa0dd929a603" (UID: "97333a10-733b-4dd1-bf22-fa0dd929a603"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.436996 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-config-data" (OuterVolumeSpecName: "config-data") pod "97333a10-733b-4dd1-bf22-fa0dd929a603" (UID: "97333a10-733b-4dd1-bf22-fa0dd929a603"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.440347 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-scripts" (OuterVolumeSpecName: "scripts") pod "97333a10-733b-4dd1-bf22-fa0dd929a603" (UID: "97333a10-733b-4dd1-bf22-fa0dd929a603"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.443785 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "97333a10-733b-4dd1-bf22-fa0dd929a603" (UID: "97333a10-733b-4dd1-bf22-fa0dd929a603"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.466406 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "97333a10-733b-4dd1-bf22-fa0dd929a603" (UID: "97333a10-733b-4dd1-bf22-fa0dd929a603"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.507737 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.507787 5010 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.507798 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.507808 5010 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/97333a10-733b-4dd1-bf22-fa0dd929a603-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.507818 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/97333a10-733b-4dd1-bf22-fa0dd929a603-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.507826 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mrbq\" (UniqueName: \"kubernetes.io/projected/97333a10-733b-4dd1-bf22-fa0dd929a603-kube-api-access-4mrbq\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:05 crc kubenswrapper[5010]: I1126 17:14:05.507835 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/97333a10-733b-4dd1-bf22-fa0dd929a603-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:06 crc kubenswrapper[5010]: I1126 17:14:06.163781 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d4b4bf468-nc42p" event={"ID":"97333a10-733b-4dd1-bf22-fa0dd929a603","Type":"ContainerDied","Data":"042ba81fda05085d1b904e9515d920b66061534d06cd45297df2359e09f0367e"} Nov 26 17:14:06 crc kubenswrapper[5010]: I1126 17:14:06.163931 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d4b4bf468-nc42p" Nov 26 17:14:06 crc kubenswrapper[5010]: I1126 17:14:06.164240 5010 scope.go:117] "RemoveContainer" containerID="7cfd97cd29773dd7b4c80ae6af261eb20becaea38e0cb84374e5637dd750e63f" Nov 26 17:14:06 crc kubenswrapper[5010]: I1126 17:14:06.209436 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-d4b4bf468-nc42p"] Nov 26 17:14:06 crc kubenswrapper[5010]: I1126 17:14:06.221290 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-d4b4bf468-nc42p"] Nov 26 17:14:06 crc kubenswrapper[5010]: I1126 17:14:06.359777 5010 scope.go:117] "RemoveContainer" containerID="c18815d65b2570321424e1b7990f1e9725d92081a423b40d728b0c7006509430" Nov 26 17:14:07 crc kubenswrapper[5010]: I1126 17:14:07.904006 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" path="/var/lib/kubelet/pods/97333a10-733b-4dd1-bf22-fa0dd929a603/volumes" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.359095 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5f65b9f7c4-6mgjh"] Nov 26 17:14:16 crc kubenswrapper[5010]: E1126 17:14:16.360724 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.360744 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: E1126 17:14:16.360784 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.360791 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: E1126 17:14:16.360808 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.360814 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: E1126 17:14:16.360844 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.360850 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: E1126 17:14:16.360878 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.360884 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: E1126 17:14:16.360907 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.360917 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.361278 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.361307 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.361333 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.361355 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="071ac2f8-0e86-473e-b024-253a9d667774" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.361381 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="97333a10-733b-4dd1-bf22-fa0dd929a603" containerName="horizon-log" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.361405 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe33350a-e0bc-434f-84a2-eeffaeeaa2d6" containerName="horizon" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.363240 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.381664 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-horizon-tls-certs\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.381879 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-horizon-secret-key\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.381932 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zmls\" (UniqueName: \"kubernetes.io/projected/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-kube-api-access-7zmls\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.382172 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-combined-ca-bundle\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.382234 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-config-data\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.382451 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-scripts\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.382696 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-logs\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.404383 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5f65b9f7c4-6mgjh"] Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484178 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-horizon-tls-certs\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484266 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-horizon-secret-key\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484290 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zmls\" (UniqueName: \"kubernetes.io/projected/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-kube-api-access-7zmls\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484339 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-combined-ca-bundle\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484360 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-config-data\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484386 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-scripts\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484453 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-logs\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.484867 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-logs\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.485921 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-scripts\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.486213 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-config-data\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.490331 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-horizon-secret-key\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.490605 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-horizon-tls-certs\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.491283 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-combined-ca-bundle\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.502791 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zmls\" (UniqueName: \"kubernetes.io/projected/d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b-kube-api-access-7zmls\") pod \"horizon-5f65b9f7c4-6mgjh\" (UID: \"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b\") " pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:16 crc kubenswrapper[5010]: I1126 17:14:16.688477 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.181278 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5f65b9f7c4-6mgjh"] Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.293065 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f65b9f7c4-6mgjh" event={"ID":"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b","Type":"ContainerStarted","Data":"85191aeedeb0f138598055177a177a6ab06d03a6da69b978a1b1015c5c461aae"} Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.777608 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-nj5lp"] Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.779231 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.789423 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-nj5lp"] Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.809727 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1875832b-7338-4ab0-bb6d-445884217d0e-operator-scripts\") pod \"heat-db-create-nj5lp\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.809804 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jhlvx\" (UniqueName: \"kubernetes.io/projected/1875832b-7338-4ab0-bb6d-445884217d0e-kube-api-access-jhlvx\") pod \"heat-db-create-nj5lp\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.891803 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:14:17 crc kubenswrapper[5010]: E1126 17:14:17.892140 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.911874 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1875832b-7338-4ab0-bb6d-445884217d0e-operator-scripts\") pod \"heat-db-create-nj5lp\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.911934 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jhlvx\" (UniqueName: \"kubernetes.io/projected/1875832b-7338-4ab0-bb6d-445884217d0e-kube-api-access-jhlvx\") pod \"heat-db-create-nj5lp\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.913151 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1875832b-7338-4ab0-bb6d-445884217d0e-operator-scripts\") pod \"heat-db-create-nj5lp\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.920689 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-2bc7-account-create-update-n6r49"] Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.922057 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.925865 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.926306 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-2bc7-account-create-update-n6r49"] Nov 26 17:14:17 crc kubenswrapper[5010]: I1126 17:14:17.962005 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jhlvx\" (UniqueName: \"kubernetes.io/projected/1875832b-7338-4ab0-bb6d-445884217d0e-kube-api-access-jhlvx\") pod \"heat-db-create-nj5lp\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.014043 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tdjfr\" (UniqueName: \"kubernetes.io/projected/31f53da5-3952-4e07-8495-c864c959ac7d-kube-api-access-tdjfr\") pod \"heat-2bc7-account-create-update-n6r49\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.014205 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31f53da5-3952-4e07-8495-c864c959ac7d-operator-scripts\") pod \"heat-2bc7-account-create-update-n6r49\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.115857 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tdjfr\" (UniqueName: \"kubernetes.io/projected/31f53da5-3952-4e07-8495-c864c959ac7d-kube-api-access-tdjfr\") pod \"heat-2bc7-account-create-update-n6r49\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.115977 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31f53da5-3952-4e07-8495-c864c959ac7d-operator-scripts\") pod \"heat-2bc7-account-create-update-n6r49\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.116587 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31f53da5-3952-4e07-8495-c864c959ac7d-operator-scripts\") pod \"heat-2bc7-account-create-update-n6r49\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.118532 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.131803 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tdjfr\" (UniqueName: \"kubernetes.io/projected/31f53da5-3952-4e07-8495-c864c959ac7d-kube-api-access-tdjfr\") pod \"heat-2bc7-account-create-update-n6r49\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.293450 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.305618 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f65b9f7c4-6mgjh" event={"ID":"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b","Type":"ContainerStarted","Data":"295623965bfde6dbb48dd7bbb4e4bf77e6679bb10d3a7fcf67d5b9c76f8f9bdd"} Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.305840 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5f65b9f7c4-6mgjh" event={"ID":"d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b","Type":"ContainerStarted","Data":"61a3802503e6a18af956c911680b324819890ee49e1fb4ccb995d87710647902"} Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.343748 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5f65b9f7c4-6mgjh" podStartSLOduration=2.343729512 podStartE2EDuration="2.343729512s" podCreationTimestamp="2025-11-26 17:14:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:14:18.331026496 +0000 UTC m=+6479.121743654" watchObservedRunningTime="2025-11-26 17:14:18.343729512 +0000 UTC m=+6479.134446680" Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.624552 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-nj5lp"] Nov 26 17:14:18 crc kubenswrapper[5010]: I1126 17:14:18.763831 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-2bc7-account-create-update-n6r49"] Nov 26 17:14:18 crc kubenswrapper[5010]: W1126 17:14:18.776219 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31f53da5_3952_4e07_8495_c864c959ac7d.slice/crio-c559cc6f924e94d48592a89b935c5a4f5e56351df15c3b63cecd1867f56923de WatchSource:0}: Error finding container c559cc6f924e94d48592a89b935c5a4f5e56351df15c3b63cecd1867f56923de: Status 404 returned error can't find the container with id c559cc6f924e94d48592a89b935c5a4f5e56351df15c3b63cecd1867f56923de Nov 26 17:14:19 crc kubenswrapper[5010]: I1126 17:14:19.321566 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2bc7-account-create-update-n6r49" event={"ID":"31f53da5-3952-4e07-8495-c864c959ac7d","Type":"ContainerDied","Data":"b4199fd063c93076abb075ea068dd7d553b535286a3fbc7e2e601f09adad8acd"} Nov 26 17:14:19 crc kubenswrapper[5010]: I1126 17:14:19.321459 5010 generic.go:334] "Generic (PLEG): container finished" podID="31f53da5-3952-4e07-8495-c864c959ac7d" containerID="b4199fd063c93076abb075ea068dd7d553b535286a3fbc7e2e601f09adad8acd" exitCode=0 Nov 26 17:14:19 crc kubenswrapper[5010]: I1126 17:14:19.322305 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2bc7-account-create-update-n6r49" event={"ID":"31f53da5-3952-4e07-8495-c864c959ac7d","Type":"ContainerStarted","Data":"c559cc6f924e94d48592a89b935c5a4f5e56351df15c3b63cecd1867f56923de"} Nov 26 17:14:19 crc kubenswrapper[5010]: I1126 17:14:19.325038 5010 generic.go:334] "Generic (PLEG): container finished" podID="1875832b-7338-4ab0-bb6d-445884217d0e" containerID="efddce0f7bba9050137d414ab8c3df851156bd00e8187e9d654f9e5f4a3b6987" exitCode=0 Nov 26 17:14:19 crc kubenswrapper[5010]: I1126 17:14:19.325124 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-nj5lp" event={"ID":"1875832b-7338-4ab0-bb6d-445884217d0e","Type":"ContainerDied","Data":"efddce0f7bba9050137d414ab8c3df851156bd00e8187e9d654f9e5f4a3b6987"} Nov 26 17:14:19 crc kubenswrapper[5010]: I1126 17:14:19.325181 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-nj5lp" event={"ID":"1875832b-7338-4ab0-bb6d-445884217d0e","Type":"ContainerStarted","Data":"b7c9051438d880bf7ead1f0feed2357f2dbc7cbd4ab5141621f55811b0262b8d"} Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.048191 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-xl79j"] Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.060407 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-xl79j"] Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.834808 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.843890 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.877137 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tdjfr\" (UniqueName: \"kubernetes.io/projected/31f53da5-3952-4e07-8495-c864c959ac7d-kube-api-access-tdjfr\") pod \"31f53da5-3952-4e07-8495-c864c959ac7d\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.877191 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhlvx\" (UniqueName: \"kubernetes.io/projected/1875832b-7338-4ab0-bb6d-445884217d0e-kube-api-access-jhlvx\") pod \"1875832b-7338-4ab0-bb6d-445884217d0e\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.877306 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31f53da5-3952-4e07-8495-c864c959ac7d-operator-scripts\") pod \"31f53da5-3952-4e07-8495-c864c959ac7d\" (UID: \"31f53da5-3952-4e07-8495-c864c959ac7d\") " Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.877371 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1875832b-7338-4ab0-bb6d-445884217d0e-operator-scripts\") pod \"1875832b-7338-4ab0-bb6d-445884217d0e\" (UID: \"1875832b-7338-4ab0-bb6d-445884217d0e\") " Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.877940 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1875832b-7338-4ab0-bb6d-445884217d0e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1875832b-7338-4ab0-bb6d-445884217d0e" (UID: "1875832b-7338-4ab0-bb6d-445884217d0e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.878246 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1875832b-7338-4ab0-bb6d-445884217d0e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.878913 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31f53da5-3952-4e07-8495-c864c959ac7d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "31f53da5-3952-4e07-8495-c864c959ac7d" (UID: "31f53da5-3952-4e07-8495-c864c959ac7d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.896958 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1875832b-7338-4ab0-bb6d-445884217d0e-kube-api-access-jhlvx" (OuterVolumeSpecName: "kube-api-access-jhlvx") pod "1875832b-7338-4ab0-bb6d-445884217d0e" (UID: "1875832b-7338-4ab0-bb6d-445884217d0e"). InnerVolumeSpecName "kube-api-access-jhlvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.898125 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31f53da5-3952-4e07-8495-c864c959ac7d-kube-api-access-tdjfr" (OuterVolumeSpecName: "kube-api-access-tdjfr") pod "31f53da5-3952-4e07-8495-c864c959ac7d" (UID: "31f53da5-3952-4e07-8495-c864c959ac7d"). InnerVolumeSpecName "kube-api-access-tdjfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.980170 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tdjfr\" (UniqueName: \"kubernetes.io/projected/31f53da5-3952-4e07-8495-c864c959ac7d-kube-api-access-tdjfr\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.980205 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhlvx\" (UniqueName: \"kubernetes.io/projected/1875832b-7338-4ab0-bb6d-445884217d0e-kube-api-access-jhlvx\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:20 crc kubenswrapper[5010]: I1126 17:14:20.980218 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/31f53da5-3952-4e07-8495-c864c959ac7d-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.036306 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-b021-account-create-update-tz28t"] Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.048564 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-b021-account-create-update-tz28t"] Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.352030 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-2bc7-account-create-update-n6r49" Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.352408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-2bc7-account-create-update-n6r49" event={"ID":"31f53da5-3952-4e07-8495-c864c959ac7d","Type":"ContainerDied","Data":"c559cc6f924e94d48592a89b935c5a4f5e56351df15c3b63cecd1867f56923de"} Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.352449 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c559cc6f924e94d48592a89b935c5a4f5e56351df15c3b63cecd1867f56923de" Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.354408 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-nj5lp" event={"ID":"1875832b-7338-4ab0-bb6d-445884217d0e","Type":"ContainerDied","Data":"b7c9051438d880bf7ead1f0feed2357f2dbc7cbd4ab5141621f55811b0262b8d"} Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.354467 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7c9051438d880bf7ead1f0feed2357f2dbc7cbd4ab5141621f55811b0262b8d" Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.354497 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-nj5lp" Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.907016 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14d26833-10fc-4fa3-9dfd-a0497e5dc238" path="/var/lib/kubelet/pods/14d26833-10fc-4fa3-9dfd-a0497e5dc238/volumes" Nov 26 17:14:21 crc kubenswrapper[5010]: I1126 17:14:21.908184 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f499269-4475-44ec-8b84-4979a96f2412" path="/var/lib/kubelet/pods/1f499269-4475-44ec-8b84-4979a96f2412/volumes" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.368330 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-8k2tw"] Nov 26 17:14:23 crc kubenswrapper[5010]: E1126 17:14:23.369110 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1875832b-7338-4ab0-bb6d-445884217d0e" containerName="mariadb-database-create" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.369125 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="1875832b-7338-4ab0-bb6d-445884217d0e" containerName="mariadb-database-create" Nov 26 17:14:23 crc kubenswrapper[5010]: E1126 17:14:23.369157 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f53da5-3952-4e07-8495-c864c959ac7d" containerName="mariadb-account-create-update" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.369163 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f53da5-3952-4e07-8495-c864c959ac7d" containerName="mariadb-account-create-update" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.369373 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="31f53da5-3952-4e07-8495-c864c959ac7d" containerName="mariadb-account-create-update" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.369398 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="1875832b-7338-4ab0-bb6d-445884217d0e" containerName="mariadb-database-create" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.370150 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.384332 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-8k2tw"] Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.384724 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-tqdnm" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.385566 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.530628 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh9dk\" (UniqueName: \"kubernetes.io/projected/0f1bd435-d206-440d-8054-83fe2688501a-kube-api-access-qh9dk\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.530926 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-config-data\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.531206 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-combined-ca-bundle\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.634071 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-combined-ca-bundle\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.634387 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh9dk\" (UniqueName: \"kubernetes.io/projected/0f1bd435-d206-440d-8054-83fe2688501a-kube-api-access-qh9dk\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.635436 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-config-data\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.662440 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh9dk\" (UniqueName: \"kubernetes.io/projected/0f1bd435-d206-440d-8054-83fe2688501a-kube-api-access-qh9dk\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.663239 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-config-data\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.696215 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-combined-ca-bundle\") pod \"heat-db-sync-8k2tw\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:23 crc kubenswrapper[5010]: I1126 17:14:23.991811 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:24 crc kubenswrapper[5010]: W1126 17:14:24.507375 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0f1bd435_d206_440d_8054_83fe2688501a.slice/crio-b1e4f610f67256460519607f46b945bdc213cf413dcf673946d5425d3c3dfe27 WatchSource:0}: Error finding container b1e4f610f67256460519607f46b945bdc213cf413dcf673946d5425d3c3dfe27: Status 404 returned error can't find the container with id b1e4f610f67256460519607f46b945bdc213cf413dcf673946d5425d3c3dfe27 Nov 26 17:14:24 crc kubenswrapper[5010]: I1126 17:14:24.507900 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-8k2tw"] Nov 26 17:14:25 crc kubenswrapper[5010]: I1126 17:14:25.400782 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-8k2tw" event={"ID":"0f1bd435-d206-440d-8054-83fe2688501a","Type":"ContainerStarted","Data":"b1e4f610f67256460519607f46b945bdc213cf413dcf673946d5425d3c3dfe27"} Nov 26 17:14:26 crc kubenswrapper[5010]: I1126 17:14:26.689271 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:26 crc kubenswrapper[5010]: I1126 17:14:26.689636 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:31 crc kubenswrapper[5010]: I1126 17:14:31.892499 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:14:31 crc kubenswrapper[5010]: E1126 17:14:31.894570 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:14:33 crc kubenswrapper[5010]: I1126 17:14:33.489561 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-8k2tw" event={"ID":"0f1bd435-d206-440d-8054-83fe2688501a","Type":"ContainerStarted","Data":"f3fc7137ccf729cc44a262af335e9f6165e56831e915fcc52cb9fd5b5218216a"} Nov 26 17:14:33 crc kubenswrapper[5010]: I1126 17:14:33.514010 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-8k2tw" podStartSLOduration=1.88635185 podStartE2EDuration="10.513969371s" podCreationTimestamp="2025-11-26 17:14:23 +0000 UTC" firstStartedPulling="2025-11-26 17:14:24.510559836 +0000 UTC m=+6485.301276984" lastFinishedPulling="2025-11-26 17:14:33.138177357 +0000 UTC m=+6493.928894505" observedRunningTime="2025-11-26 17:14:33.507566192 +0000 UTC m=+6494.298283360" watchObservedRunningTime="2025-11-26 17:14:33.513969371 +0000 UTC m=+6494.304686519" Nov 26 17:14:34 crc kubenswrapper[5010]: I1126 17:14:34.047008 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-qzk97"] Nov 26 17:14:34 crc kubenswrapper[5010]: I1126 17:14:34.061014 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-qzk97"] Nov 26 17:14:35 crc kubenswrapper[5010]: I1126 17:14:35.510794 5010 generic.go:334] "Generic (PLEG): container finished" podID="0f1bd435-d206-440d-8054-83fe2688501a" containerID="f3fc7137ccf729cc44a262af335e9f6165e56831e915fcc52cb9fd5b5218216a" exitCode=0 Nov 26 17:14:35 crc kubenswrapper[5010]: I1126 17:14:35.511183 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-8k2tw" event={"ID":"0f1bd435-d206-440d-8054-83fe2688501a","Type":"ContainerDied","Data":"f3fc7137ccf729cc44a262af335e9f6165e56831e915fcc52cb9fd5b5218216a"} Nov 26 17:14:35 crc kubenswrapper[5010]: I1126 17:14:35.902410 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4545040-6d0b-4c50-87bf-7963256037cd" path="/var/lib/kubelet/pods/d4545040-6d0b-4c50-87bf-7963256037cd/volumes" Nov 26 17:14:36 crc kubenswrapper[5010]: I1126 17:14:36.693578 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5f65b9f7c4-6mgjh" podUID="d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.137:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.137:8443: connect: connection refused" Nov 26 17:14:36 crc kubenswrapper[5010]: I1126 17:14:36.900663 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:36 crc kubenswrapper[5010]: I1126 17:14:36.914677 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-config-data\") pod \"0f1bd435-d206-440d-8054-83fe2688501a\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " Nov 26 17:14:36 crc kubenswrapper[5010]: I1126 17:14:36.918746 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-combined-ca-bundle\") pod \"0f1bd435-d206-440d-8054-83fe2688501a\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " Nov 26 17:14:36 crc kubenswrapper[5010]: I1126 17:14:36.918846 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qh9dk\" (UniqueName: \"kubernetes.io/projected/0f1bd435-d206-440d-8054-83fe2688501a-kube-api-access-qh9dk\") pod \"0f1bd435-d206-440d-8054-83fe2688501a\" (UID: \"0f1bd435-d206-440d-8054-83fe2688501a\") " Nov 26 17:14:36 crc kubenswrapper[5010]: I1126 17:14:36.922874 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f1bd435-d206-440d-8054-83fe2688501a-kube-api-access-qh9dk" (OuterVolumeSpecName: "kube-api-access-qh9dk") pod "0f1bd435-d206-440d-8054-83fe2688501a" (UID: "0f1bd435-d206-440d-8054-83fe2688501a"). InnerVolumeSpecName "kube-api-access-qh9dk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:36 crc kubenswrapper[5010]: I1126 17:14:36.947104 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0f1bd435-d206-440d-8054-83fe2688501a" (UID: "0f1bd435-d206-440d-8054-83fe2688501a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:37 crc kubenswrapper[5010]: I1126 17:14:37.004054 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-config-data" (OuterVolumeSpecName: "config-data") pod "0f1bd435-d206-440d-8054-83fe2688501a" (UID: "0f1bd435-d206-440d-8054-83fe2688501a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:37 crc kubenswrapper[5010]: I1126 17:14:37.023356 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:37 crc kubenswrapper[5010]: I1126 17:14:37.023399 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qh9dk\" (UniqueName: \"kubernetes.io/projected/0f1bd435-d206-440d-8054-83fe2688501a-kube-api-access-qh9dk\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:37 crc kubenswrapper[5010]: I1126 17:14:37.023413 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0f1bd435-d206-440d-8054-83fe2688501a-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:37 crc kubenswrapper[5010]: I1126 17:14:37.542781 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-8k2tw" event={"ID":"0f1bd435-d206-440d-8054-83fe2688501a","Type":"ContainerDied","Data":"b1e4f610f67256460519607f46b945bdc213cf413dcf673946d5425d3c3dfe27"} Nov 26 17:14:37 crc kubenswrapper[5010]: I1126 17:14:37.542830 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-8k2tw" Nov 26 17:14:37 crc kubenswrapper[5010]: I1126 17:14:37.542838 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1e4f610f67256460519607f46b945bdc213cf413dcf673946d5425d3c3dfe27" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.585913 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-699778bfb5-v4krn"] Nov 26 17:14:38 crc kubenswrapper[5010]: E1126 17:14:38.586802 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f1bd435-d206-440d-8054-83fe2688501a" containerName="heat-db-sync" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.586819 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f1bd435-d206-440d-8054-83fe2688501a" containerName="heat-db-sync" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.587078 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f1bd435-d206-440d-8054-83fe2688501a" containerName="heat-db-sync" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.588003 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.593450 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.593810 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.594040 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-tqdnm" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.595979 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-699778bfb5-v4krn"] Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.656053 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nc2q\" (UniqueName: \"kubernetes.io/projected/75a0fd7a-951c-4ebb-baee-445422257b73-kube-api-access-6nc2q\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.656134 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.656189 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data-custom\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.656209 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-combined-ca-bundle\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.717393 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-856555b5b6-jhzph"] Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.731151 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.764840 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.767842 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data-custom\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.767894 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.767977 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rlkp\" (UniqueName: \"kubernetes.io/projected/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-kube-api-access-9rlkp\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.768019 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nc2q\" (UniqueName: \"kubernetes.io/projected/75a0fd7a-951c-4ebb-baee-445422257b73-kube-api-access-6nc2q\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.768104 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.768169 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-combined-ca-bundle\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.768211 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data-custom\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.768234 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-combined-ca-bundle\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.796291 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.797466 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-combined-ca-bundle\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.812681 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nc2q\" (UniqueName: \"kubernetes.io/projected/75a0fd7a-951c-4ebb-baee-445422257b73-kube-api-access-6nc2q\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.812765 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-856555b5b6-jhzph"] Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.813755 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data-custom\") pod \"heat-engine-699778bfb5-v4krn\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.824527 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-677df475ff-fgs6n"] Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.825775 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.828306 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.852575 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-677df475ff-fgs6n"] Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.888815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4mmvq\" (UniqueName: \"kubernetes.io/projected/90605dba-0c35-4fa5-b783-859a2d9f0e1f-kube-api-access-4mmvq\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.888845 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-combined-ca-bundle\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.888902 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-combined-ca-bundle\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.889053 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data-custom\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.889128 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data-custom\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.889149 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.889165 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.889198 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rlkp\" (UniqueName: \"kubernetes.io/projected/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-kube-api-access-9rlkp\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.893428 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data-custom\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.895109 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.896802 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-combined-ca-bundle\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.910245 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.914985 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rlkp\" (UniqueName: \"kubernetes.io/projected/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-kube-api-access-9rlkp\") pod \"heat-cfnapi-856555b5b6-jhzph\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.991813 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.992308 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4mmvq\" (UniqueName: \"kubernetes.io/projected/90605dba-0c35-4fa5-b783-859a2d9f0e1f-kube-api-access-4mmvq\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.992328 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-combined-ca-bundle\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:38 crc kubenswrapper[5010]: I1126 17:14:38.992397 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data-custom\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.002917 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data-custom\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.002957 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.003889 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-combined-ca-bundle\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.014929 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4mmvq\" (UniqueName: \"kubernetes.io/projected/90605dba-0c35-4fa5-b783-859a2d9f0e1f-kube-api-access-4mmvq\") pod \"heat-api-677df475ff-fgs6n\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.092878 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.182010 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.442617 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-699778bfb5-v4krn"] Nov 26 17:14:39 crc kubenswrapper[5010]: W1126 17:14:39.461394 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod75a0fd7a_951c_4ebb_baee_445422257b73.slice/crio-f6e4efd72ce27593ed221d38ebf28d7cd02466be67cfd1aabee0b7260c04cfe2 WatchSource:0}: Error finding container f6e4efd72ce27593ed221d38ebf28d7cd02466be67cfd1aabee0b7260c04cfe2: Status 404 returned error can't find the container with id f6e4efd72ce27593ed221d38ebf28d7cd02466be67cfd1aabee0b7260c04cfe2 Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.568222 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-699778bfb5-v4krn" event={"ID":"75a0fd7a-951c-4ebb-baee-445422257b73","Type":"ContainerStarted","Data":"f6e4efd72ce27593ed221d38ebf28d7cd02466be67cfd1aabee0b7260c04cfe2"} Nov 26 17:14:39 crc kubenswrapper[5010]: W1126 17:14:39.595705 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod90605dba_0c35_4fa5_b783_859a2d9f0e1f.slice/crio-60adcb5d8049fc14deb065a35bd2f733e1b6f4e62447a4cbe38fdaf61a5bcff3 WatchSource:0}: Error finding container 60adcb5d8049fc14deb065a35bd2f733e1b6f4e62447a4cbe38fdaf61a5bcff3: Status 404 returned error can't find the container with id 60adcb5d8049fc14deb065a35bd2f733e1b6f4e62447a4cbe38fdaf61a5bcff3 Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.600989 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-677df475ff-fgs6n"] Nov 26 17:14:39 crc kubenswrapper[5010]: I1126 17:14:39.672952 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-856555b5b6-jhzph"] Nov 26 17:14:40 crc kubenswrapper[5010]: I1126 17:14:40.590419 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-699778bfb5-v4krn" event={"ID":"75a0fd7a-951c-4ebb-baee-445422257b73","Type":"ContainerStarted","Data":"c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459"} Nov 26 17:14:40 crc kubenswrapper[5010]: I1126 17:14:40.591097 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:40 crc kubenswrapper[5010]: I1126 17:14:40.591947 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-677df475ff-fgs6n" event={"ID":"90605dba-0c35-4fa5-b783-859a2d9f0e1f","Type":"ContainerStarted","Data":"60adcb5d8049fc14deb065a35bd2f733e1b6f4e62447a4cbe38fdaf61a5bcff3"} Nov 26 17:14:40 crc kubenswrapper[5010]: I1126 17:14:40.597560 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-856555b5b6-jhzph" event={"ID":"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa","Type":"ContainerStarted","Data":"31b53f65d0b6f4c1c49061ee3f0e491972d187186071ea6c60ede646740d0808"} Nov 26 17:14:40 crc kubenswrapper[5010]: I1126 17:14:40.646208 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-699778bfb5-v4krn" podStartSLOduration=2.646182499 podStartE2EDuration="2.646182499s" podCreationTimestamp="2025-11-26 17:14:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:14:40.635957205 +0000 UTC m=+6501.426674353" watchObservedRunningTime="2025-11-26 17:14:40.646182499 +0000 UTC m=+6501.436899647" Nov 26 17:14:42 crc kubenswrapper[5010]: I1126 17:14:42.891314 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:14:42 crc kubenswrapper[5010]: E1126 17:14:42.892846 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:14:43 crc kubenswrapper[5010]: I1126 17:14:43.645957 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-677df475ff-fgs6n" event={"ID":"90605dba-0c35-4fa5-b783-859a2d9f0e1f","Type":"ContainerStarted","Data":"ba743c0129581c0ebb93b02da298bfd8bee2ffb16d2112c2ef3ce2ffe6c4133f"} Nov 26 17:14:43 crc kubenswrapper[5010]: I1126 17:14:43.646427 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:43 crc kubenswrapper[5010]: I1126 17:14:43.647697 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-856555b5b6-jhzph" event={"ID":"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa","Type":"ContainerStarted","Data":"3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec"} Nov 26 17:14:43 crc kubenswrapper[5010]: I1126 17:14:43.647867 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:43 crc kubenswrapper[5010]: I1126 17:14:43.710480 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-677df475ff-fgs6n" podStartSLOduration=2.276073116 podStartE2EDuration="5.71045543s" podCreationTimestamp="2025-11-26 17:14:38 +0000 UTC" firstStartedPulling="2025-11-26 17:14:39.598865678 +0000 UTC m=+6500.389582826" lastFinishedPulling="2025-11-26 17:14:43.033247992 +0000 UTC m=+6503.823965140" observedRunningTime="2025-11-26 17:14:43.702568894 +0000 UTC m=+6504.493286042" watchObservedRunningTime="2025-11-26 17:14:43.71045543 +0000 UTC m=+6504.501172578" Nov 26 17:14:43 crc kubenswrapper[5010]: I1126 17:14:43.732131 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-856555b5b6-jhzph" podStartSLOduration=2.376762961 podStartE2EDuration="5.732111399s" podCreationTimestamp="2025-11-26 17:14:38 +0000 UTC" firstStartedPulling="2025-11-26 17:14:39.675259598 +0000 UTC m=+6500.465976756" lastFinishedPulling="2025-11-26 17:14:43.030608046 +0000 UTC m=+6503.821325194" observedRunningTime="2025-11-26 17:14:43.725149626 +0000 UTC m=+6504.515866774" watchObservedRunningTime="2025-11-26 17:14:43.732111399 +0000 UTC m=+6504.522828547" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.354497 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-75d4958f6-ntkst"] Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.357283 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.376727 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-dcc45f6bf-vtg9c"] Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.379249 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.390676 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-75d4958f6-ntkst"] Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.406386 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-dcc45f6bf-vtg9c"] Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.423180 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-6c5768474-47jdj"] Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.426238 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434185 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434309 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nx4td\" (UniqueName: \"kubernetes.io/projected/ca0aa660-11d7-4ab4-8edf-cead47f8c396-kube-api-access-nx4td\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434351 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-combined-ca-bundle\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434449 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfdxl\" (UniqueName: \"kubernetes.io/projected/38f71da4-1f9a-445e-af17-081b174ed1ee-kube-api-access-bfdxl\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434548 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8bnt\" (UniqueName: \"kubernetes.io/projected/35650a48-d859-4f5e-a8cf-3aaea836183f-kube-api-access-m8bnt\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434584 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-config-data\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434613 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-combined-ca-bundle\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434679 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-combined-ca-bundle\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434911 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data-custom\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.434950 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.435004 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-config-data-custom\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.435047 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data-custom\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.527509 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6c5768474-47jdj"] Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537393 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537474 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nx4td\" (UniqueName: \"kubernetes.io/projected/ca0aa660-11d7-4ab4-8edf-cead47f8c396-kube-api-access-nx4td\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537501 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-combined-ca-bundle\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537547 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfdxl\" (UniqueName: \"kubernetes.io/projected/38f71da4-1f9a-445e-af17-081b174ed1ee-kube-api-access-bfdxl\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537591 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8bnt\" (UniqueName: \"kubernetes.io/projected/35650a48-d859-4f5e-a8cf-3aaea836183f-kube-api-access-m8bnt\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537611 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-config-data\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537629 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-combined-ca-bundle\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537666 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-combined-ca-bundle\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537801 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data-custom\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537823 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537861 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-config-data-custom\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.537887 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data-custom\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.550074 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-combined-ca-bundle\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.552983 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data-custom\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.553622 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.553783 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.553968 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-config-data-custom\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.554328 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data-custom\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.554646 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-config-data\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.555671 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8bnt\" (UniqueName: \"kubernetes.io/projected/35650a48-d859-4f5e-a8cf-3aaea836183f-kube-api-access-m8bnt\") pod \"heat-api-6c5768474-47jdj\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.557106 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-combined-ca-bundle\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.558685 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nx4td\" (UniqueName: \"kubernetes.io/projected/ca0aa660-11d7-4ab4-8edf-cead47f8c396-kube-api-access-nx4td\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.569632 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca0aa660-11d7-4ab4-8edf-cead47f8c396-combined-ca-bundle\") pod \"heat-engine-75d4958f6-ntkst\" (UID: \"ca0aa660-11d7-4ab4-8edf-cead47f8c396\") " pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.572961 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfdxl\" (UniqueName: \"kubernetes.io/projected/38f71da4-1f9a-445e-af17-081b174ed1ee-kube-api-access-bfdxl\") pod \"heat-cfnapi-dcc45f6bf-vtg9c\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.683798 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.703842 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:45 crc kubenswrapper[5010]: I1126 17:14:45.756974 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.224052 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-75d4958f6-ntkst"] Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.320075 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-dcc45f6bf-vtg9c"] Nov 26 17:14:46 crc kubenswrapper[5010]: W1126 17:14:46.331185 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38f71da4_1f9a_445e_af17_081b174ed1ee.slice/crio-3d4071f70a6029829a7b3e22840d2deb4e215c339af10462854994159c81072c WatchSource:0}: Error finding container 3d4071f70a6029829a7b3e22840d2deb4e215c339af10462854994159c81072c: Status 404 returned error can't find the container with id 3d4071f70a6029829a7b3e22840d2deb4e215c339af10462854994159c81072c Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.401470 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6c5768474-47jdj"] Nov 26 17:14:46 crc kubenswrapper[5010]: W1126 17:14:46.404813 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod35650a48_d859_4f5e_a8cf_3aaea836183f.slice/crio-f6a833df6599054d993d4e3c0d3ba36c2938dcc0c09274685414e4382d525232 WatchSource:0}: Error finding container f6a833df6599054d993d4e3c0d3ba36c2938dcc0c09274685414e4382d525232: Status 404 returned error can't find the container with id f6a833df6599054d993d4e3c0d3ba36c2938dcc0c09274685414e4382d525232 Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.645994 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-677df475ff-fgs6n"] Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.646248 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-api-677df475ff-fgs6n" podUID="90605dba-0c35-4fa5-b783-859a2d9f0e1f" containerName="heat-api" containerID="cri-o://ba743c0129581c0ebb93b02da298bfd8bee2ffb16d2112c2ef3ce2ffe6c4133f" gracePeriod=60 Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.657898 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-856555b5b6-jhzph"] Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.661564 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-856555b5b6-jhzph" podUID="8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" containerName="heat-cfnapi" containerID="cri-o://3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec" gracePeriod=60 Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.684850 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75d4958f6-ntkst" event={"ID":"ca0aa660-11d7-4ab4-8edf-cead47f8c396","Type":"ContainerStarted","Data":"c8677f57f5fd7bfad645dff0a803c259b2a95f099df78831b298bafc539ded47"} Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.684902 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75d4958f6-ntkst" event={"ID":"ca0aa660-11d7-4ab4-8edf-cead47f8c396","Type":"ContainerStarted","Data":"13b5e9fb3fb3e08f1c50f883e822c99a9c8409da8412a8af2f52994a358db140"} Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.685079 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.687779 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-98fd67cb-2wpwn"] Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.689607 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.691899 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.692530 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.695112 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" event={"ID":"38f71da4-1f9a-445e-af17-081b174ed1ee","Type":"ContainerStarted","Data":"81a1bd823d55c7c4e5b0f78fc863e708505d6795c6d8e431cbd467126027325c"} Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.695161 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" event={"ID":"38f71da4-1f9a-445e-af17-081b174ed1ee","Type":"ContainerStarted","Data":"3d4071f70a6029829a7b3e22840d2deb4e215c339af10462854994159c81072c"} Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.697632 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.701626 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6c5768474-47jdj" event={"ID":"35650a48-d859-4f5e-a8cf-3aaea836183f","Type":"ContainerStarted","Data":"f6a833df6599054d993d4e3c0d3ba36c2938dcc0c09274685414e4382d525232"} Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.704374 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.710527 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-9d4c7768f-xlvpp"] Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.712139 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.717658 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.717939 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.723773 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-98fd67cb-2wpwn"] Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.734696 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-75d4958f6-ntkst" podStartSLOduration=1.734677985 podStartE2EDuration="1.734677985s" podCreationTimestamp="2025-11-26 17:14:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:14:46.710132365 +0000 UTC m=+6507.500849523" watchObservedRunningTime="2025-11-26 17:14:46.734677985 +0000 UTC m=+6507.525395133" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.737724 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-9d4c7768f-xlvpp"] Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.749150 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" podStartSLOduration=1.749128715 podStartE2EDuration="1.749128715s" podCreationTimestamp="2025-11-26 17:14:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:14:46.731876896 +0000 UTC m=+6507.522594044" watchObservedRunningTime="2025-11-26 17:14:46.749128715 +0000 UTC m=+6507.539845863" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.765537 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-6c5768474-47jdj" podStartSLOduration=1.7655127720000001 podStartE2EDuration="1.765512772s" podCreationTimestamp="2025-11-26 17:14:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:14:46.754210951 +0000 UTC m=+6507.544928109" watchObservedRunningTime="2025-11-26 17:14:46.765512772 +0000 UTC m=+6507.556229920" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768623 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-public-tls-certs\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768670 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-config-data-custom\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768699 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-config-data-custom\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768767 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-combined-ca-bundle\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768783 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-combined-ca-bundle\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768808 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-internal-tls-certs\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768828 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5hcq\" (UniqueName: \"kubernetes.io/projected/3503617f-ad5f-4f7a-b67f-03d8cc42e360-kube-api-access-x5hcq\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768863 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-config-data\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768909 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-public-tls-certs\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768942 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4r2bj\" (UniqueName: \"kubernetes.io/projected/a14ed063-d477-4b0d-8d6b-064deba25b74-kube-api-access-4r2bj\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.768995 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-config-data\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.769059 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-internal-tls-certs\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870482 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-public-tls-certs\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870794 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-config-data-custom\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870823 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-config-data-custom\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870851 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-combined-ca-bundle\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870868 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-combined-ca-bundle\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870894 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-internal-tls-certs\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870917 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5hcq\" (UniqueName: \"kubernetes.io/projected/3503617f-ad5f-4f7a-b67f-03d8cc42e360-kube-api-access-x5hcq\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870946 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-config-data\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.870979 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-public-tls-certs\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.871015 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4r2bj\" (UniqueName: \"kubernetes.io/projected/a14ed063-d477-4b0d-8d6b-064deba25b74-kube-api-access-4r2bj\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.871058 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-config-data\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.871099 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-internal-tls-certs\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.876891 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-combined-ca-bundle\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.877801 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-public-tls-certs\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.880546 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-config-data\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.880557 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-config-data-custom\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.881245 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-public-tls-certs\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.881312 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-config-data-custom\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.881732 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-config-data\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.882328 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a14ed063-d477-4b0d-8d6b-064deba25b74-internal-tls-certs\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.882522 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-internal-tls-certs\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.888467 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3503617f-ad5f-4f7a-b67f-03d8cc42e360-combined-ca-bundle\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.897245 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5hcq\" (UniqueName: \"kubernetes.io/projected/3503617f-ad5f-4f7a-b67f-03d8cc42e360-kube-api-access-x5hcq\") pod \"heat-api-9d4c7768f-xlvpp\" (UID: \"3503617f-ad5f-4f7a-b67f-03d8cc42e360\") " pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:46 crc kubenswrapper[5010]: I1126 17:14:46.900293 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4r2bj\" (UniqueName: \"kubernetes.io/projected/a14ed063-d477-4b0d-8d6b-064deba25b74-kube-api-access-4r2bj\") pod \"heat-cfnapi-98fd67cb-2wpwn\" (UID: \"a14ed063-d477-4b0d-8d6b-064deba25b74\") " pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.029519 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.041238 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.323209 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.391620 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data-custom\") pod \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.391831 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-combined-ca-bundle\") pod \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.391946 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rlkp\" (UniqueName: \"kubernetes.io/projected/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-kube-api-access-9rlkp\") pod \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.391988 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data\") pod \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.399070 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" (UID: "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.405275 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-kube-api-access-9rlkp" (OuterVolumeSpecName: "kube-api-access-9rlkp") pod "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" (UID: "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa"). InnerVolumeSpecName "kube-api-access-9rlkp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.433880 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" (UID: "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.495845 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data" (OuterVolumeSpecName: "config-data") pod "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" (UID: "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.496933 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data\") pod \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\" (UID: \"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa\") " Nov 26 17:14:47 crc kubenswrapper[5010]: W1126 17:14:47.497140 5010 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa/volumes/kubernetes.io~secret/config-data Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.497187 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data" (OuterVolumeSpecName: "config-data") pod "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" (UID: "8e2ef10b-5d7e-4a31-8d64-b72fec7373fa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.498061 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.498081 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.498090 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rlkp\" (UniqueName: \"kubernetes.io/projected/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-kube-api-access-9rlkp\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.498101 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.639274 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-98fd67cb-2wpwn"] Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.670006 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-9d4c7768f-xlvpp"] Nov 26 17:14:47 crc kubenswrapper[5010]: W1126 17:14:47.687872 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda14ed063_d477_4b0d_8d6b_064deba25b74.slice/crio-79f141119e0358fcb8048d69b398f7580f90cd0c6fec7a6f8a5f8da28a9bcc41 WatchSource:0}: Error finding container 79f141119e0358fcb8048d69b398f7580f90cd0c6fec7a6f8a5f8da28a9bcc41: Status 404 returned error can't find the container with id 79f141119e0358fcb8048d69b398f7580f90cd0c6fec7a6f8a5f8da28a9bcc41 Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.725873 5010 generic.go:334] "Generic (PLEG): container finished" podID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerID="3fd448550ec3b36b9971ed895b6279a7fe27bbcc050e960d213d598c94893f5b" exitCode=1 Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.725947 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6c5768474-47jdj" event={"ID":"35650a48-d859-4f5e-a8cf-3aaea836183f","Type":"ContainerDied","Data":"3fd448550ec3b36b9971ed895b6279a7fe27bbcc050e960d213d598c94893f5b"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.726667 5010 scope.go:117] "RemoveContainer" containerID="3fd448550ec3b36b9971ed895b6279a7fe27bbcc050e960d213d598c94893f5b" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.733387 5010 generic.go:334] "Generic (PLEG): container finished" podID="8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" containerID="3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec" exitCode=0 Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.733444 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-856555b5b6-jhzph" event={"ID":"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa","Type":"ContainerDied","Data":"3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.733468 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-856555b5b6-jhzph" event={"ID":"8e2ef10b-5d7e-4a31-8d64-b72fec7373fa","Type":"ContainerDied","Data":"31b53f65d0b6f4c1c49061ee3f0e491972d187186071ea6c60ede646740d0808"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.733517 5010 scope.go:117] "RemoveContainer" containerID="3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.733643 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.740324 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9d4c7768f-xlvpp" event={"ID":"3503617f-ad5f-4f7a-b67f-03d8cc42e360","Type":"ContainerStarted","Data":"a3b4ac5aa7aca37b140e728d842e04b0f598c4f49a15d77e73e7560827f535d0"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.751051 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-98fd67cb-2wpwn" event={"ID":"a14ed063-d477-4b0d-8d6b-064deba25b74","Type":"ContainerStarted","Data":"79f141119e0358fcb8048d69b398f7580f90cd0c6fec7a6f8a5f8da28a9bcc41"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.753649 5010 generic.go:334] "Generic (PLEG): container finished" podID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerID="81a1bd823d55c7c4e5b0f78fc863e708505d6795c6d8e431cbd467126027325c" exitCode=1 Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.753739 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" event={"ID":"38f71da4-1f9a-445e-af17-081b174ed1ee","Type":"ContainerDied","Data":"81a1bd823d55c7c4e5b0f78fc863e708505d6795c6d8e431cbd467126027325c"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.754529 5010 scope.go:117] "RemoveContainer" containerID="81a1bd823d55c7c4e5b0f78fc863e708505d6795c6d8e431cbd467126027325c" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.767646 5010 generic.go:334] "Generic (PLEG): container finished" podID="90605dba-0c35-4fa5-b783-859a2d9f0e1f" containerID="ba743c0129581c0ebb93b02da298bfd8bee2ffb16d2112c2ef3ce2ffe6c4133f" exitCode=0 Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.768828 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-677df475ff-fgs6n" event={"ID":"90605dba-0c35-4fa5-b783-859a2d9f0e1f","Type":"ContainerDied","Data":"ba743c0129581c0ebb93b02da298bfd8bee2ffb16d2112c2ef3ce2ffe6c4133f"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.768872 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-677df475ff-fgs6n" event={"ID":"90605dba-0c35-4fa5-b783-859a2d9f0e1f","Type":"ContainerDied","Data":"60adcb5d8049fc14deb065a35bd2f733e1b6f4e62447a4cbe38fdaf61a5bcff3"} Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.768890 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60adcb5d8049fc14deb065a35bd2f733e1b6f4e62447a4cbe38fdaf61a5bcff3" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.921321 5010 scope.go:117] "RemoveContainer" containerID="3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec" Nov 26 17:14:47 crc kubenswrapper[5010]: E1126 17:14:47.922807 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec\": container with ID starting with 3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec not found: ID does not exist" containerID="3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.922851 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec"} err="failed to get container status \"3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec\": rpc error: code = NotFound desc = could not find container \"3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec\": container with ID starting with 3903ee79efa5842ce681d196369ab9859e5453f3aebfb2550cdef5451d4183ec not found: ID does not exist" Nov 26 17:14:47 crc kubenswrapper[5010]: I1126 17:14:47.965836 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.012332 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data\") pod \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.012500 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-combined-ca-bundle\") pod \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.012580 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4mmvq\" (UniqueName: \"kubernetes.io/projected/90605dba-0c35-4fa5-b783-859a2d9f0e1f-kube-api-access-4mmvq\") pod \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.012684 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data-custom\") pod \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\" (UID: \"90605dba-0c35-4fa5-b783-859a2d9f0e1f\") " Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.062923 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "90605dba-0c35-4fa5-b783-859a2d9f0e1f" (UID: "90605dba-0c35-4fa5-b783-859a2d9f0e1f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.062932 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90605dba-0c35-4fa5-b783-859a2d9f0e1f-kube-api-access-4mmvq" (OuterVolumeSpecName: "kube-api-access-4mmvq") pod "90605dba-0c35-4fa5-b783-859a2d9f0e1f" (UID: "90605dba-0c35-4fa5-b783-859a2d9f0e1f"). InnerVolumeSpecName "kube-api-access-4mmvq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.115462 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4mmvq\" (UniqueName: \"kubernetes.io/projected/90605dba-0c35-4fa5-b783-859a2d9f0e1f-kube-api-access-4mmvq\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.115510 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.212661 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "90605dba-0c35-4fa5-b783-859a2d9f0e1f" (UID: "90605dba-0c35-4fa5-b783-859a2d9f0e1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.218641 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.227395 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data" (OuterVolumeSpecName: "config-data") pod "90605dba-0c35-4fa5-b783-859a2d9f0e1f" (UID: "90605dba-0c35-4fa5-b783-859a2d9f0e1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.320635 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/90605dba-0c35-4fa5-b783-859a2d9f0e1f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.780121 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-98fd67cb-2wpwn" event={"ID":"a14ed063-d477-4b0d-8d6b-064deba25b74","Type":"ContainerStarted","Data":"0cd1fc71a97213b0b42d7171646b9286a42c43ebe9c032129786b3a7e633a9dd"} Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.780456 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.784321 5010 generic.go:334] "Generic (PLEG): container finished" podID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerID="7ed92aa6d195e4871e00b6a05c930ab6c7cb6f083f80d35f1e419dc19ffd2a2b" exitCode=1 Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.784497 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" event={"ID":"38f71da4-1f9a-445e-af17-081b174ed1ee","Type":"ContainerDied","Data":"7ed92aa6d195e4871e00b6a05c930ab6c7cb6f083f80d35f1e419dc19ffd2a2b"} Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.784559 5010 scope.go:117] "RemoveContainer" containerID="81a1bd823d55c7c4e5b0f78fc863e708505d6795c6d8e431cbd467126027325c" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.785187 5010 scope.go:117] "RemoveContainer" containerID="7ed92aa6d195e4871e00b6a05c930ab6c7cb6f083f80d35f1e419dc19ffd2a2b" Nov 26 17:14:48 crc kubenswrapper[5010]: E1126 17:14:48.785502 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-dcc45f6bf-vtg9c_openstack(38f71da4-1f9a-445e-af17-081b174ed1ee)\"" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.787434 5010 generic.go:334] "Generic (PLEG): container finished" podID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerID="731bbb62ddda444e6336e02fc47c89f7357aead79f6fea293a456e4fd296850f" exitCode=1 Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.787486 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6c5768474-47jdj" event={"ID":"35650a48-d859-4f5e-a8cf-3aaea836183f","Type":"ContainerDied","Data":"731bbb62ddda444e6336e02fc47c89f7357aead79f6fea293a456e4fd296850f"} Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.788171 5010 scope.go:117] "RemoveContainer" containerID="731bbb62ddda444e6336e02fc47c89f7357aead79f6fea293a456e4fd296850f" Nov 26 17:14:48 crc kubenswrapper[5010]: E1126 17:14:48.788392 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-6c5768474-47jdj_openstack(35650a48-d859-4f5e-a8cf-3aaea836183f)\"" pod="openstack/heat-api-6c5768474-47jdj" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.792899 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-677df475ff-fgs6n" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.792966 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-9d4c7768f-xlvpp" event={"ID":"3503617f-ad5f-4f7a-b67f-03d8cc42e360","Type":"ContainerStarted","Data":"792482686e0802c4e37099e136880aa3ba9220016a10e46ed60b4dc3f1c883ae"} Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.818298 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-98fd67cb-2wpwn" podStartSLOduration=2.818273673 podStartE2EDuration="2.818273673s" podCreationTimestamp="2025-11-26 17:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:14:48.805726131 +0000 UTC m=+6509.596443299" watchObservedRunningTime="2025-11-26 17:14:48.818273673 +0000 UTC m=+6509.608990831" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.845875 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-9d4c7768f-xlvpp" podStartSLOduration=2.845855549 podStartE2EDuration="2.845855549s" podCreationTimestamp="2025-11-26 17:14:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:14:48.838016824 +0000 UTC m=+6509.628733972" watchObservedRunningTime="2025-11-26 17:14:48.845855549 +0000 UTC m=+6509.636572697" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.898876 5010 scope.go:117] "RemoveContainer" containerID="3fd448550ec3b36b9971ed895b6279a7fe27bbcc050e960d213d598c94893f5b" Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.942406 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-677df475ff-fgs6n"] Nov 26 17:14:48 crc kubenswrapper[5010]: I1126 17:14:48.966357 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-677df475ff-fgs6n"] Nov 26 17:14:49 crc kubenswrapper[5010]: I1126 17:14:49.523164 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:49 crc kubenswrapper[5010]: I1126 17:14:49.810871 5010 scope.go:117] "RemoveContainer" containerID="731bbb62ddda444e6336e02fc47c89f7357aead79f6fea293a456e4fd296850f" Nov 26 17:14:49 crc kubenswrapper[5010]: I1126 17:14:49.811051 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:49 crc kubenswrapper[5010]: E1126 17:14:49.811521 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-6c5768474-47jdj_openstack(35650a48-d859-4f5e-a8cf-3aaea836183f)\"" pod="openstack/heat-api-6c5768474-47jdj" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" Nov 26 17:14:49 crc kubenswrapper[5010]: I1126 17:14:49.812172 5010 scope.go:117] "RemoveContainer" containerID="7ed92aa6d195e4871e00b6a05c930ab6c7cb6f083f80d35f1e419dc19ffd2a2b" Nov 26 17:14:49 crc kubenswrapper[5010]: E1126 17:14:49.812522 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-dcc45f6bf-vtg9c_openstack(38f71da4-1f9a-445e-af17-081b174ed1ee)\"" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" Nov 26 17:14:49 crc kubenswrapper[5010]: I1126 17:14:49.903787 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90605dba-0c35-4fa5-b783-859a2d9f0e1f" path="/var/lib/kubelet/pods/90605dba-0c35-4fa5-b783-859a2d9f0e1f/volumes" Nov 26 17:14:50 crc kubenswrapper[5010]: I1126 17:14:50.705330 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:50 crc kubenswrapper[5010]: I1126 17:14:50.705380 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:50 crc kubenswrapper[5010]: I1126 17:14:50.757457 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:50 crc kubenswrapper[5010]: I1126 17:14:50.757526 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:50 crc kubenswrapper[5010]: I1126 17:14:50.819835 5010 scope.go:117] "RemoveContainer" containerID="731bbb62ddda444e6336e02fc47c89f7357aead79f6fea293a456e4fd296850f" Nov 26 17:14:50 crc kubenswrapper[5010]: E1126 17:14:50.820393 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-6c5768474-47jdj_openstack(35650a48-d859-4f5e-a8cf-3aaea836183f)\"" pod="openstack/heat-api-6c5768474-47jdj" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" Nov 26 17:14:50 crc kubenswrapper[5010]: I1126 17:14:50.824702 5010 scope.go:117] "RemoveContainer" containerID="7ed92aa6d195e4871e00b6a05c930ab6c7cb6f083f80d35f1e419dc19ffd2a2b" Nov 26 17:14:50 crc kubenswrapper[5010]: E1126 17:14:50.825827 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-dcc45f6bf-vtg9c_openstack(38f71da4-1f9a-445e-af17-081b174ed1ee)\"" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" Nov 26 17:14:51 crc kubenswrapper[5010]: I1126 17:14:51.327674 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5f65b9f7c4-6mgjh" Nov 26 17:14:51 crc kubenswrapper[5010]: I1126 17:14:51.410193 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-d8bcc7678-hw72b"] Nov 26 17:14:51 crc kubenswrapper[5010]: I1126 17:14:51.410602 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-d8bcc7678-hw72b" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" containerID="cri-o://423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2" gracePeriod=30 Nov 26 17:14:51 crc kubenswrapper[5010]: I1126 17:14:51.411943 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-d8bcc7678-hw72b" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon-log" containerID="cri-o://5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1" gracePeriod=30 Nov 26 17:14:51 crc kubenswrapper[5010]: I1126 17:14:51.828455 5010 scope.go:117] "RemoveContainer" containerID="7ed92aa6d195e4871e00b6a05c930ab6c7cb6f083f80d35f1e419dc19ffd2a2b" Nov 26 17:14:51 crc kubenswrapper[5010]: E1126 17:14:51.828901 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-dcc45f6bf-vtg9c_openstack(38f71da4-1f9a-445e-af17-081b174ed1ee)\"" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" Nov 26 17:14:53 crc kubenswrapper[5010]: I1126 17:14:53.891424 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:14:53 crc kubenswrapper[5010]: E1126 17:14:53.893421 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:14:54 crc kubenswrapper[5010]: I1126 17:14:54.876032 5010 generic.go:334] "Generic (PLEG): container finished" podID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerID="423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2" exitCode=0 Nov 26 17:14:54 crc kubenswrapper[5010]: I1126 17:14:54.876074 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8bcc7678-hw72b" event={"ID":"646c3bd8-03a6-43c3-9226-9a68680d20e0","Type":"ContainerDied","Data":"423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2"} Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.337472 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-98fd67cb-2wpwn" Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.406389 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-9d4c7768f-xlvpp" Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.425206 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-dcc45f6bf-vtg9c"] Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.488125 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6c5768474-47jdj"] Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.934893 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" event={"ID":"38f71da4-1f9a-445e-af17-081b174ed1ee","Type":"ContainerDied","Data":"3d4071f70a6029829a7b3e22840d2deb4e215c339af10462854994159c81072c"} Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.935198 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d4071f70a6029829a7b3e22840d2deb4e215c339af10462854994159c81072c" Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.937896 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6c5768474-47jdj" event={"ID":"35650a48-d859-4f5e-a8cf-3aaea836183f","Type":"ContainerDied","Data":"f6a833df6599054d993d4e3c0d3ba36c2938dcc0c09274685414e4382d525232"} Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.937955 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f6a833df6599054d993d4e3c0d3ba36c2938dcc0c09274685414e4382d525232" Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.952366 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.961377 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.980311 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bfdxl\" (UniqueName: \"kubernetes.io/projected/38f71da4-1f9a-445e-af17-081b174ed1ee-kube-api-access-bfdxl\") pod \"38f71da4-1f9a-445e-af17-081b174ed1ee\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.980406 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data\") pod \"35650a48-d859-4f5e-a8cf-3aaea836183f\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.992665 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:14:58 crc kubenswrapper[5010]: I1126 17:14:58.994169 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38f71da4-1f9a-445e-af17-081b174ed1ee-kube-api-access-bfdxl" (OuterVolumeSpecName: "kube-api-access-bfdxl") pod "38f71da4-1f9a-445e-af17-081b174ed1ee" (UID: "38f71da4-1f9a-445e-af17-081b174ed1ee"). InnerVolumeSpecName "kube-api-access-bfdxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.082017 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8bnt\" (UniqueName: \"kubernetes.io/projected/35650a48-d859-4f5e-a8cf-3aaea836183f-kube-api-access-m8bnt\") pod \"35650a48-d859-4f5e-a8cf-3aaea836183f\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.082062 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data-custom\") pod \"38f71da4-1f9a-445e-af17-081b174ed1ee\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.082095 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data-custom\") pod \"35650a48-d859-4f5e-a8cf-3aaea836183f\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.082153 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-combined-ca-bundle\") pod \"35650a48-d859-4f5e-a8cf-3aaea836183f\" (UID: \"35650a48-d859-4f5e-a8cf-3aaea836183f\") " Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.082224 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-combined-ca-bundle\") pod \"38f71da4-1f9a-445e-af17-081b174ed1ee\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.082364 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data\") pod \"38f71da4-1f9a-445e-af17-081b174ed1ee\" (UID: \"38f71da4-1f9a-445e-af17-081b174ed1ee\") " Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.082940 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bfdxl\" (UniqueName: \"kubernetes.io/projected/38f71da4-1f9a-445e-af17-081b174ed1ee-kube-api-access-bfdxl\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.087147 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35650a48-d859-4f5e-a8cf-3aaea836183f-kube-api-access-m8bnt" (OuterVolumeSpecName: "kube-api-access-m8bnt") pod "35650a48-d859-4f5e-a8cf-3aaea836183f" (UID: "35650a48-d859-4f5e-a8cf-3aaea836183f"). InnerVolumeSpecName "kube-api-access-m8bnt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.088573 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "38f71da4-1f9a-445e-af17-081b174ed1ee" (UID: "38f71da4-1f9a-445e-af17-081b174ed1ee"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.088686 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "35650a48-d859-4f5e-a8cf-3aaea836183f" (UID: "35650a48-d859-4f5e-a8cf-3aaea836183f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.103694 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data" (OuterVolumeSpecName: "config-data") pod "35650a48-d859-4f5e-a8cf-3aaea836183f" (UID: "35650a48-d859-4f5e-a8cf-3aaea836183f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.112350 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "38f71da4-1f9a-445e-af17-081b174ed1ee" (UID: "38f71da4-1f9a-445e-af17-081b174ed1ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.117893 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35650a48-d859-4f5e-a8cf-3aaea836183f" (UID: "35650a48-d859-4f5e-a8cf-3aaea836183f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.160065 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data" (OuterVolumeSpecName: "config-data") pod "38f71da4-1f9a-445e-af17-081b174ed1ee" (UID: "38f71da4-1f9a-445e-af17-081b174ed1ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.184949 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.184984 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8bnt\" (UniqueName: \"kubernetes.io/projected/35650a48-d859-4f5e-a8cf-3aaea836183f-kube-api-access-m8bnt\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.184998 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.185007 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.185015 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35650a48-d859-4f5e-a8cf-3aaea836183f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.185023 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.185033 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38f71da4-1f9a-445e-af17-081b174ed1ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.950861 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6c5768474-47jdj" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.951863 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dcc45f6bf-vtg9c" Nov 26 17:14:59 crc kubenswrapper[5010]: I1126 17:14:59.998560 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6c5768474-47jdj"] Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.003563 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-6c5768474-47jdj"] Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.019782 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-dcc45f6bf-vtg9c"] Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.032796 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-dcc45f6bf-vtg9c"] Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.204645 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc"] Nov 26 17:15:00 crc kubenswrapper[5010]: E1126 17:15:00.205566 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.205594 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: E1126 17:15:00.205642 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.205652 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: E1126 17:15:00.205676 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.205682 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: E1126 17:15:00.205767 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.205790 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: E1126 17:15:00.205808 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.205844 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: E1126 17:15:00.205874 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90605dba-0c35-4fa5-b783-859a2d9f0e1f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.205881 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="90605dba-0c35-4fa5-b783-859a2d9f0e1f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.206269 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.206298 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="90605dba-0c35-4fa5-b783-859a2d9f0e1f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.206325 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.206341 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.206351 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" containerName="heat-api" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.206359 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" containerName="heat-cfnapi" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.207632 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.210785 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.210820 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.219985 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc"] Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.239592 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8744ecb1-0343-454f-89ba-f7e8e63d40f5-secret-volume\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.239663 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8744ecb1-0343-454f-89ba-f7e8e63d40f5-config-volume\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.239726 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzh9h\" (UniqueName: \"kubernetes.io/projected/8744ecb1-0343-454f-89ba-f7e8e63d40f5-kube-api-access-lzh9h\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.341026 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8744ecb1-0343-454f-89ba-f7e8e63d40f5-secret-volume\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.341082 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8744ecb1-0343-454f-89ba-f7e8e63d40f5-config-volume\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.341115 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzh9h\" (UniqueName: \"kubernetes.io/projected/8744ecb1-0343-454f-89ba-f7e8e63d40f5-kube-api-access-lzh9h\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.342278 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8744ecb1-0343-454f-89ba-f7e8e63d40f5-config-volume\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.345070 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8744ecb1-0343-454f-89ba-f7e8e63d40f5-secret-volume\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.358411 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzh9h\" (UniqueName: \"kubernetes.io/projected/8744ecb1-0343-454f-89ba-f7e8e63d40f5-kube-api-access-lzh9h\") pod \"collect-profiles-29402955-h7kbc\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.483877 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-d8bcc7678-hw72b" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.134:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.134:8443: connect: connection refused" Nov 26 17:15:00 crc kubenswrapper[5010]: I1126 17:15:00.542830 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:01 crc kubenswrapper[5010]: I1126 17:15:01.017110 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc"] Nov 26 17:15:01 crc kubenswrapper[5010]: I1126 17:15:01.904279 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35650a48-d859-4f5e-a8cf-3aaea836183f" path="/var/lib/kubelet/pods/35650a48-d859-4f5e-a8cf-3aaea836183f/volumes" Nov 26 17:15:01 crc kubenswrapper[5010]: I1126 17:15:01.905319 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38f71da4-1f9a-445e-af17-081b174ed1ee" path="/var/lib/kubelet/pods/38f71da4-1f9a-445e-af17-081b174ed1ee/volumes" Nov 26 17:15:01 crc kubenswrapper[5010]: I1126 17:15:01.980838 5010 generic.go:334] "Generic (PLEG): container finished" podID="8744ecb1-0343-454f-89ba-f7e8e63d40f5" containerID="8c318b6a6a889f2bfdd15c17a5148fefa56ba35b2f9bca5380e0587c5fbd723f" exitCode=0 Nov 26 17:15:01 crc kubenswrapper[5010]: I1126 17:15:01.980889 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" event={"ID":"8744ecb1-0343-454f-89ba-f7e8e63d40f5","Type":"ContainerDied","Data":"8c318b6a6a889f2bfdd15c17a5148fefa56ba35b2f9bca5380e0587c5fbd723f"} Nov 26 17:15:01 crc kubenswrapper[5010]: I1126 17:15:01.980922 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" event={"ID":"8744ecb1-0343-454f-89ba-f7e8e63d40f5","Type":"ContainerStarted","Data":"19a04f6ccd632c79186aedc64f00cb9de787c3c1c30feea767e2789510c48292"} Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.404483 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.529800 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8744ecb1-0343-454f-89ba-f7e8e63d40f5-config-volume\") pod \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.530059 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzh9h\" (UniqueName: \"kubernetes.io/projected/8744ecb1-0343-454f-89ba-f7e8e63d40f5-kube-api-access-lzh9h\") pod \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.530153 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8744ecb1-0343-454f-89ba-f7e8e63d40f5-secret-volume\") pod \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\" (UID: \"8744ecb1-0343-454f-89ba-f7e8e63d40f5\") " Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.531353 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8744ecb1-0343-454f-89ba-f7e8e63d40f5-config-volume" (OuterVolumeSpecName: "config-volume") pod "8744ecb1-0343-454f-89ba-f7e8e63d40f5" (UID: "8744ecb1-0343-454f-89ba-f7e8e63d40f5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.536985 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8744ecb1-0343-454f-89ba-f7e8e63d40f5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8744ecb1-0343-454f-89ba-f7e8e63d40f5" (UID: "8744ecb1-0343-454f-89ba-f7e8e63d40f5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.538027 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8744ecb1-0343-454f-89ba-f7e8e63d40f5-kube-api-access-lzh9h" (OuterVolumeSpecName: "kube-api-access-lzh9h") pod "8744ecb1-0343-454f-89ba-f7e8e63d40f5" (UID: "8744ecb1-0343-454f-89ba-f7e8e63d40f5"). InnerVolumeSpecName "kube-api-access-lzh9h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.633144 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8744ecb1-0343-454f-89ba-f7e8e63d40f5-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.633190 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzh9h\" (UniqueName: \"kubernetes.io/projected/8744ecb1-0343-454f-89ba-f7e8e63d40f5-kube-api-access-lzh9h\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:03 crc kubenswrapper[5010]: I1126 17:15:03.633203 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8744ecb1-0343-454f-89ba-f7e8e63d40f5-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:04 crc kubenswrapper[5010]: I1126 17:15:04.001841 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" event={"ID":"8744ecb1-0343-454f-89ba-f7e8e63d40f5","Type":"ContainerDied","Data":"19a04f6ccd632c79186aedc64f00cb9de787c3c1c30feea767e2789510c48292"} Nov 26 17:15:04 crc kubenswrapper[5010]: I1126 17:15:04.001879 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19a04f6ccd632c79186aedc64f00cb9de787c3c1c30feea767e2789510c48292" Nov 26 17:15:04 crc kubenswrapper[5010]: I1126 17:15:04.001898 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc" Nov 26 17:15:04 crc kubenswrapper[5010]: I1126 17:15:04.487057 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5"] Nov 26 17:15:04 crc kubenswrapper[5010]: I1126 17:15:04.495772 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402910-z2rv5"] Nov 26 17:15:04 crc kubenswrapper[5010]: I1126 17:15:04.891813 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:15:04 crc kubenswrapper[5010]: E1126 17:15:04.892056 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:15:05 crc kubenswrapper[5010]: I1126 17:15:05.722221 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-75d4958f6-ntkst" Nov 26 17:15:05 crc kubenswrapper[5010]: I1126 17:15:05.779212 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-699778bfb5-v4krn"] Nov 26 17:15:05 crc kubenswrapper[5010]: I1126 17:15:05.783995 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-699778bfb5-v4krn" podUID="75a0fd7a-951c-4ebb-baee-445422257b73" containerName="heat-engine" containerID="cri-o://c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" gracePeriod=60 Nov 26 17:15:05 crc kubenswrapper[5010]: I1126 17:15:05.904615 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d320b698-2ff1-407a-9659-75e46bb26aec" path="/var/lib/kubelet/pods/d320b698-2ff1-407a-9659-75e46bb26aec/volumes" Nov 26 17:15:08 crc kubenswrapper[5010]: I1126 17:15:08.750409 5010 scope.go:117] "RemoveContainer" containerID="368465e7c170c2129d8e4403b53f6d879901aa871f87407b0624efcffd21b987" Nov 26 17:15:08 crc kubenswrapper[5010]: I1126 17:15:08.792088 5010 scope.go:117] "RemoveContainer" containerID="10b48c5486e07bdd2361e652cc81d7854e0a6c930b735c4748ef3039b454a796" Nov 26 17:15:08 crc kubenswrapper[5010]: I1126 17:15:08.866377 5010 scope.go:117] "RemoveContainer" containerID="da40ef4ab582018cb44d4316bc04185e043320117689038a8cca8834c267a79d" Nov 26 17:15:08 crc kubenswrapper[5010]: I1126 17:15:08.889671 5010 scope.go:117] "RemoveContainer" containerID="7a1f56a419e244d5d029b5a003119c30a5ba5e4d78f3b4fd489fbb3ef38e92c8" Nov 26 17:15:08 crc kubenswrapper[5010]: E1126 17:15:08.912640 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 17:15:08 crc kubenswrapper[5010]: E1126 17:15:08.914158 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 17:15:08 crc kubenswrapper[5010]: E1126 17:15:08.915544 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Nov 26 17:15:08 crc kubenswrapper[5010]: E1126 17:15:08.915604 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-699778bfb5-v4krn" podUID="75a0fd7a-951c-4ebb-baee-445422257b73" containerName="heat-engine" Nov 26 17:15:08 crc kubenswrapper[5010]: I1126 17:15:08.955320 5010 scope.go:117] "RemoveContainer" containerID="d52b166b66babe1c7de3112556ec64b4c42b803e0f1b9cf1a79ba27e870df93e" Nov 26 17:15:10 crc kubenswrapper[5010]: I1126 17:15:10.484129 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-d8bcc7678-hw72b" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.134:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.134:8443: connect: connection refused" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.719949 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.760825 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data\") pod \"75a0fd7a-951c-4ebb-baee-445422257b73\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.761222 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-combined-ca-bundle\") pod \"75a0fd7a-951c-4ebb-baee-445422257b73\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.761292 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nc2q\" (UniqueName: \"kubernetes.io/projected/75a0fd7a-951c-4ebb-baee-445422257b73-kube-api-access-6nc2q\") pod \"75a0fd7a-951c-4ebb-baee-445422257b73\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.761340 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data-custom\") pod \"75a0fd7a-951c-4ebb-baee-445422257b73\" (UID: \"75a0fd7a-951c-4ebb-baee-445422257b73\") " Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.768845 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "75a0fd7a-951c-4ebb-baee-445422257b73" (UID: "75a0fd7a-951c-4ebb-baee-445422257b73"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.768948 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75a0fd7a-951c-4ebb-baee-445422257b73-kube-api-access-6nc2q" (OuterVolumeSpecName: "kube-api-access-6nc2q") pod "75a0fd7a-951c-4ebb-baee-445422257b73" (UID: "75a0fd7a-951c-4ebb-baee-445422257b73"). InnerVolumeSpecName "kube-api-access-6nc2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.798381 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "75a0fd7a-951c-4ebb-baee-445422257b73" (UID: "75a0fd7a-951c-4ebb-baee-445422257b73"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.818871 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data" (OuterVolumeSpecName: "config-data") pod "75a0fd7a-951c-4ebb-baee-445422257b73" (UID: "75a0fd7a-951c-4ebb-baee-445422257b73"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.864452 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.864490 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.864501 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nc2q\" (UniqueName: \"kubernetes.io/projected/75a0fd7a-951c-4ebb-baee-445422257b73-kube-api-access-6nc2q\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:13 crc kubenswrapper[5010]: I1126 17:15:13.864510 5010 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/75a0fd7a-951c-4ebb-baee-445422257b73-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.103822 5010 generic.go:334] "Generic (PLEG): container finished" podID="75a0fd7a-951c-4ebb-baee-445422257b73" containerID="c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" exitCode=0 Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.103905 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-699778bfb5-v4krn" event={"ID":"75a0fd7a-951c-4ebb-baee-445422257b73","Type":"ContainerDied","Data":"c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459"} Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.103936 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-699778bfb5-v4krn" event={"ID":"75a0fd7a-951c-4ebb-baee-445422257b73","Type":"ContainerDied","Data":"f6e4efd72ce27593ed221d38ebf28d7cd02466be67cfd1aabee0b7260c04cfe2"} Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.103955 5010 scope.go:117] "RemoveContainer" containerID="c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.104094 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-699778bfb5-v4krn" Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.127485 5010 scope.go:117] "RemoveContainer" containerID="c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" Nov 26 17:15:14 crc kubenswrapper[5010]: E1126 17:15:14.128038 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459\": container with ID starting with c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459 not found: ID does not exist" containerID="c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459" Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.128078 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459"} err="failed to get container status \"c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459\": rpc error: code = NotFound desc = could not find container \"c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459\": container with ID starting with c8168affce7e3aa27c47911891f125ac87648c1fd58f908606b2c703138e4459 not found: ID does not exist" Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.129513 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-699778bfb5-v4krn"] Nov 26 17:15:14 crc kubenswrapper[5010]: I1126 17:15:14.138360 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-699778bfb5-v4krn"] Nov 26 17:15:15 crc kubenswrapper[5010]: I1126 17:15:15.905912 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="75a0fd7a-951c-4ebb-baee-445422257b73" path="/var/lib/kubelet/pods/75a0fd7a-951c-4ebb-baee-445422257b73/volumes" Nov 26 17:15:17 crc kubenswrapper[5010]: I1126 17:15:17.888497 5010 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod8e2ef10b-5d7e-4a31-8d64-b72fec7373fa"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod8e2ef10b-5d7e-4a31-8d64-b72fec7373fa] : Timed out while waiting for systemd to remove kubepods-besteffort-pod8e2ef10b_5d7e_4a31_8d64_b72fec7373fa.slice" Nov 26 17:15:17 crc kubenswrapper[5010]: E1126 17:15:17.889021 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod8e2ef10b-5d7e-4a31-8d64-b72fec7373fa] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod8e2ef10b-5d7e-4a31-8d64-b72fec7373fa] : Timed out while waiting for systemd to remove kubepods-besteffort-pod8e2ef10b_5d7e_4a31_8d64_b72fec7373fa.slice" pod="openstack/heat-cfnapi-856555b5b6-jhzph" podUID="8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" Nov 26 17:15:17 crc kubenswrapper[5010]: I1126 17:15:17.891363 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:15:17 crc kubenswrapper[5010]: E1126 17:15:17.891603 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:15:18 crc kubenswrapper[5010]: I1126 17:15:18.158838 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-856555b5b6-jhzph" Nov 26 17:15:18 crc kubenswrapper[5010]: I1126 17:15:18.194995 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-856555b5b6-jhzph"] Nov 26 17:15:18 crc kubenswrapper[5010]: I1126 17:15:18.204603 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-856555b5b6-jhzph"] Nov 26 17:15:19 crc kubenswrapper[5010]: I1126 17:15:19.913656 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e2ef10b-5d7e-4a31-8d64-b72fec7373fa" path="/var/lib/kubelet/pods/8e2ef10b-5d7e-4a31-8d64-b72fec7373fa/volumes" Nov 26 17:15:20 crc kubenswrapper[5010]: I1126 17:15:20.485265 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-d8bcc7678-hw72b" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" probeResult="failure" output="Get \"https://10.217.1.134:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.1.134:8443: connect: connection refused" Nov 26 17:15:20 crc kubenswrapper[5010]: I1126 17:15:20.486008 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.805822 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.945402 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-secret-key\") pod \"646c3bd8-03a6-43c3-9226-9a68680d20e0\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.945439 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-scripts\") pod \"646c3bd8-03a6-43c3-9226-9a68680d20e0\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.945563 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-config-data\") pod \"646c3bd8-03a6-43c3-9226-9a68680d20e0\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.945609 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-combined-ca-bundle\") pod \"646c3bd8-03a6-43c3-9226-9a68680d20e0\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.945652 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72h2c\" (UniqueName: \"kubernetes.io/projected/646c3bd8-03a6-43c3-9226-9a68680d20e0-kube-api-access-72h2c\") pod \"646c3bd8-03a6-43c3-9226-9a68680d20e0\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.945804 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/646c3bd8-03a6-43c3-9226-9a68680d20e0-logs\") pod \"646c3bd8-03a6-43c3-9226-9a68680d20e0\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.946018 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-tls-certs\") pod \"646c3bd8-03a6-43c3-9226-9a68680d20e0\" (UID: \"646c3bd8-03a6-43c3-9226-9a68680d20e0\") " Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.947159 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/646c3bd8-03a6-43c3-9226-9a68680d20e0-logs" (OuterVolumeSpecName: "logs") pod "646c3bd8-03a6-43c3-9226-9a68680d20e0" (UID: "646c3bd8-03a6-43c3-9226-9a68680d20e0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.951437 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/646c3bd8-03a6-43c3-9226-9a68680d20e0-kube-api-access-72h2c" (OuterVolumeSpecName: "kube-api-access-72h2c") pod "646c3bd8-03a6-43c3-9226-9a68680d20e0" (UID: "646c3bd8-03a6-43c3-9226-9a68680d20e0"). InnerVolumeSpecName "kube-api-access-72h2c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.952680 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "646c3bd8-03a6-43c3-9226-9a68680d20e0" (UID: "646c3bd8-03a6-43c3-9226-9a68680d20e0"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.979635 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-scripts" (OuterVolumeSpecName: "scripts") pod "646c3bd8-03a6-43c3-9226-9a68680d20e0" (UID: "646c3bd8-03a6-43c3-9226-9a68680d20e0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.979756 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-config-data" (OuterVolumeSpecName: "config-data") pod "646c3bd8-03a6-43c3-9226-9a68680d20e0" (UID: "646c3bd8-03a6-43c3-9226-9a68680d20e0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:15:21 crc kubenswrapper[5010]: I1126 17:15:21.980503 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "646c3bd8-03a6-43c3-9226-9a68680d20e0" (UID: "646c3bd8-03a6-43c3-9226-9a68680d20e0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.031316 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "646c3bd8-03a6-43c3-9226-9a68680d20e0" (UID: "646c3bd8-03a6-43c3-9226-9a68680d20e0"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.048299 5010 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.048331 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.048340 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/646c3bd8-03a6-43c3-9226-9a68680d20e0-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.048349 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.048359 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72h2c\" (UniqueName: \"kubernetes.io/projected/646c3bd8-03a6-43c3-9226-9a68680d20e0-kube-api-access-72h2c\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.048368 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/646c3bd8-03a6-43c3-9226-9a68680d20e0-logs\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.048377 5010 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/646c3bd8-03a6-43c3-9226-9a68680d20e0-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.209857 5010 generic.go:334] "Generic (PLEG): container finished" podID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerID="5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1" exitCode=137 Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.209900 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8bcc7678-hw72b" event={"ID":"646c3bd8-03a6-43c3-9226-9a68680d20e0","Type":"ContainerDied","Data":"5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1"} Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.209910 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-d8bcc7678-hw72b" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.209925 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-d8bcc7678-hw72b" event={"ID":"646c3bd8-03a6-43c3-9226-9a68680d20e0","Type":"ContainerDied","Data":"2a9e9f956345f04e6bf386f1505739f0788f6f0eb103b65024d34f0d5e4f12b2"} Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.209943 5010 scope.go:117] "RemoveContainer" containerID="423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.260854 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-d8bcc7678-hw72b"] Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.269772 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-d8bcc7678-hw72b"] Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.405610 5010 scope.go:117] "RemoveContainer" containerID="5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.440028 5010 scope.go:117] "RemoveContainer" containerID="423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2" Nov 26 17:15:22 crc kubenswrapper[5010]: E1126 17:15:22.440629 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2\": container with ID starting with 423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2 not found: ID does not exist" containerID="423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.440682 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2"} err="failed to get container status \"423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2\": rpc error: code = NotFound desc = could not find container \"423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2\": container with ID starting with 423a6108f11feab2913e832126cdd4ffe21ba3fe691c096e91d157b32e0dd9f2 not found: ID does not exist" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.440772 5010 scope.go:117] "RemoveContainer" containerID="5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1" Nov 26 17:15:22 crc kubenswrapper[5010]: E1126 17:15:22.441258 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1\": container with ID starting with 5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1 not found: ID does not exist" containerID="5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1" Nov 26 17:15:22 crc kubenswrapper[5010]: I1126 17:15:22.441302 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1"} err="failed to get container status \"5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1\": rpc error: code = NotFound desc = could not find container \"5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1\": container with ID starting with 5718fce15c9648b72ad3c04538bbcad255bf431a49c70236078b293b7282a1a1 not found: ID does not exist" Nov 26 17:15:23 crc kubenswrapper[5010]: I1126 17:15:23.916224 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" path="/var/lib/kubelet/pods/646c3bd8-03a6-43c3-9226-9a68680d20e0/volumes" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.331329 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh"] Nov 26 17:15:28 crc kubenswrapper[5010]: E1126 17:15:28.332793 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8744ecb1-0343-454f-89ba-f7e8e63d40f5" containerName="collect-profiles" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.332818 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8744ecb1-0343-454f-89ba-f7e8e63d40f5" containerName="collect-profiles" Nov 26 17:15:28 crc kubenswrapper[5010]: E1126 17:15:28.332859 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75a0fd7a-951c-4ebb-baee-445422257b73" containerName="heat-engine" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.332872 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="75a0fd7a-951c-4ebb-baee-445422257b73" containerName="heat-engine" Nov 26 17:15:28 crc kubenswrapper[5010]: E1126 17:15:28.332920 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon-log" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.332932 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon-log" Nov 26 17:15:28 crc kubenswrapper[5010]: E1126 17:15:28.332973 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.332985 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.333405 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="75a0fd7a-951c-4ebb-baee-445422257b73" containerName="heat-engine" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.333457 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon-log" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.333500 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="646c3bd8-03a6-43c3-9226-9a68680d20e0" containerName="horizon" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.333548 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8744ecb1-0343-454f-89ba-f7e8e63d40f5" containerName="collect-profiles" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.337354 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.340210 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.350260 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh"] Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.506460 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.507173 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5z7p\" (UniqueName: \"kubernetes.io/projected/0b5519ad-45e2-4fef-b960-6090a4d87d70-kube-api-access-q5z7p\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.507549 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.609085 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.609192 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.609286 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5z7p\" (UniqueName: \"kubernetes.io/projected/0b5519ad-45e2-4fef-b960-6090a4d87d70-kube-api-access-q5z7p\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.609546 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.609602 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.629373 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5z7p\" (UniqueName: \"kubernetes.io/projected/0b5519ad-45e2-4fef-b960-6090a4d87d70-kube-api-access-q5z7p\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:28 crc kubenswrapper[5010]: I1126 17:15:28.679038 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:29 crc kubenswrapper[5010]: I1126 17:15:29.145944 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh"] Nov 26 17:15:29 crc kubenswrapper[5010]: I1126 17:15:29.282829 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" event={"ID":"0b5519ad-45e2-4fef-b960-6090a4d87d70","Type":"ContainerStarted","Data":"0d67247ed42a7ae727e1d3c0373bfa215168019b677db4dd7e01c62124adfcca"} Nov 26 17:15:29 crc kubenswrapper[5010]: I1126 17:15:29.905698 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:15:29 crc kubenswrapper[5010]: E1126 17:15:29.906274 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:15:30 crc kubenswrapper[5010]: I1126 17:15:30.301076 5010 generic.go:334] "Generic (PLEG): container finished" podID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerID="b2c05736621e789a19e44f084220bdf116aa2e44477af287d44feb824bbb828c" exitCode=0 Nov 26 17:15:30 crc kubenswrapper[5010]: I1126 17:15:30.301117 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" event={"ID":"0b5519ad-45e2-4fef-b960-6090a4d87d70","Type":"ContainerDied","Data":"b2c05736621e789a19e44f084220bdf116aa2e44477af287d44feb824bbb828c"} Nov 26 17:15:30 crc kubenswrapper[5010]: I1126 17:15:30.304470 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:15:33 crc kubenswrapper[5010]: I1126 17:15:33.333848 5010 generic.go:334] "Generic (PLEG): container finished" podID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerID="8d852c74db33a0d3d201f71a6397c464f379b59f060e5a4f32df0e23293088e9" exitCode=0 Nov 26 17:15:33 crc kubenswrapper[5010]: I1126 17:15:33.333903 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" event={"ID":"0b5519ad-45e2-4fef-b960-6090a4d87d70","Type":"ContainerDied","Data":"8d852c74db33a0d3d201f71a6397c464f379b59f060e5a4f32df0e23293088e9"} Nov 26 17:15:34 crc kubenswrapper[5010]: I1126 17:15:34.354583 5010 generic.go:334] "Generic (PLEG): container finished" podID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerID="2ac8cb89c1881116ef227c12fbcb1442a981dbdb25dea784c87caa7004235eac" exitCode=0 Nov 26 17:15:34 crc kubenswrapper[5010]: I1126 17:15:34.354670 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" event={"ID":"0b5519ad-45e2-4fef-b960-6090a4d87d70","Type":"ContainerDied","Data":"2ac8cb89c1881116ef227c12fbcb1442a981dbdb25dea784c87caa7004235eac"} Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.018156 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8qlmg"] Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.021282 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.031584 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8qlmg"] Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.165789 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-utilities\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.166024 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m89x9\" (UniqueName: \"kubernetes.io/projected/9f46a5c3-1928-4904-8122-6a698f5b7b4a-kube-api-access-m89x9\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.166119 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-catalog-content\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.267691 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-catalog-content\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.267825 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-utilities\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.267935 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m89x9\" (UniqueName: \"kubernetes.io/projected/9f46a5c3-1928-4904-8122-6a698f5b7b4a-kube-api-access-m89x9\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.268225 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-catalog-content\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.268363 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-utilities\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.306594 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m89x9\" (UniqueName: \"kubernetes.io/projected/9f46a5c3-1928-4904-8122-6a698f5b7b4a-kube-api-access-m89x9\") pod \"redhat-operators-8qlmg\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.348877 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.838773 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:35 crc kubenswrapper[5010]: W1126 17:15:35.865284 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9f46a5c3_1928_4904_8122_6a698f5b7b4a.slice/crio-bb840a69f256e071ee1a93826488f964598b43c593e0e285a26077a99c570a8a WatchSource:0}: Error finding container bb840a69f256e071ee1a93826488f964598b43c593e0e285a26077a99c570a8a: Status 404 returned error can't find the container with id bb840a69f256e071ee1a93826488f964598b43c593e0e285a26077a99c570a8a Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.865670 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8qlmg"] Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.986034 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5z7p\" (UniqueName: \"kubernetes.io/projected/0b5519ad-45e2-4fef-b960-6090a4d87d70-kube-api-access-q5z7p\") pod \"0b5519ad-45e2-4fef-b960-6090a4d87d70\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.986103 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-bundle\") pod \"0b5519ad-45e2-4fef-b960-6090a4d87d70\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.986140 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-util\") pod \"0b5519ad-45e2-4fef-b960-6090a4d87d70\" (UID: \"0b5519ad-45e2-4fef-b960-6090a4d87d70\") " Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.988168 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-bundle" (OuterVolumeSpecName: "bundle") pod "0b5519ad-45e2-4fef-b960-6090a4d87d70" (UID: "0b5519ad-45e2-4fef-b960-6090a4d87d70"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:15:35 crc kubenswrapper[5010]: I1126 17:15:35.998515 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-util" (OuterVolumeSpecName: "util") pod "0b5519ad-45e2-4fef-b960-6090a4d87d70" (UID: "0b5519ad-45e2-4fef-b960-6090a4d87d70"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.036036 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b5519ad-45e2-4fef-b960-6090a4d87d70-kube-api-access-q5z7p" (OuterVolumeSpecName: "kube-api-access-q5z7p") pod "0b5519ad-45e2-4fef-b960-6090a4d87d70" (UID: "0b5519ad-45e2-4fef-b960-6090a4d87d70"). InnerVolumeSpecName "kube-api-access-q5z7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.090230 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5z7p\" (UniqueName: \"kubernetes.io/projected/0b5519ad-45e2-4fef-b960-6090a4d87d70-kube-api-access-q5z7p\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.090274 5010 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.090286 5010 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0b5519ad-45e2-4fef-b960-6090a4d87d70-util\") on node \"crc\" DevicePath \"\"" Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.373167 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" event={"ID":"0b5519ad-45e2-4fef-b960-6090a4d87d70","Type":"ContainerDied","Data":"0d67247ed42a7ae727e1d3c0373bfa215168019b677db4dd7e01c62124adfcca"} Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.373339 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d67247ed42a7ae727e1d3c0373bfa215168019b677db4dd7e01c62124adfcca" Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.373446 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh" Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.375369 5010 generic.go:334] "Generic (PLEG): container finished" podID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerID="78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950" exitCode=0 Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.375409 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8qlmg" event={"ID":"9f46a5c3-1928-4904-8122-6a698f5b7b4a","Type":"ContainerDied","Data":"78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950"} Nov 26 17:15:36 crc kubenswrapper[5010]: I1126 17:15:36.375434 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8qlmg" event={"ID":"9f46a5c3-1928-4904-8122-6a698f5b7b4a","Type":"ContainerStarted","Data":"bb840a69f256e071ee1a93826488f964598b43c593e0e285a26077a99c570a8a"} Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.388543 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8qlmg" event={"ID":"9f46a5c3-1928-4904-8122-6a698f5b7b4a","Type":"ContainerStarted","Data":"b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e"} Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.432490 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cdxj2"] Nov 26 17:15:37 crc kubenswrapper[5010]: E1126 17:15:37.433127 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerName="pull" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.433149 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerName="pull" Nov 26 17:15:37 crc kubenswrapper[5010]: E1126 17:15:37.433180 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerName="extract" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.433244 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerName="extract" Nov 26 17:15:37 crc kubenswrapper[5010]: E1126 17:15:37.433261 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerName="util" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.433270 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerName="util" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.433943 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b5519ad-45e2-4fef-b960-6090a4d87d70" containerName="extract" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.436086 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.452820 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cdxj2"] Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.521931 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t847q\" (UniqueName: \"kubernetes.io/projected/991b800c-a342-4ff2-b6ca-e66eac243ae5-kube-api-access-t847q\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.522006 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-utilities\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.522090 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-catalog-content\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.623793 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t847q\" (UniqueName: \"kubernetes.io/projected/991b800c-a342-4ff2-b6ca-e66eac243ae5-kube-api-access-t847q\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.623849 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-utilities\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.623922 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-catalog-content\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.624388 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-catalog-content\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.624589 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-utilities\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.651854 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t847q\" (UniqueName: \"kubernetes.io/projected/991b800c-a342-4ff2-b6ca-e66eac243ae5-kube-api-access-t847q\") pod \"community-operators-cdxj2\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:37 crc kubenswrapper[5010]: I1126 17:15:37.756896 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:38 crc kubenswrapper[5010]: I1126 17:15:38.497288 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cdxj2"] Nov 26 17:15:39 crc kubenswrapper[5010]: I1126 17:15:39.456015 5010 generic.go:334] "Generic (PLEG): container finished" podID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerID="b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e" exitCode=0 Nov 26 17:15:39 crc kubenswrapper[5010]: I1126 17:15:39.456400 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8qlmg" event={"ID":"9f46a5c3-1928-4904-8122-6a698f5b7b4a","Type":"ContainerDied","Data":"b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e"} Nov 26 17:15:39 crc kubenswrapper[5010]: I1126 17:15:39.460271 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerStarted","Data":"e3af9289ac145a4bcefbd635dc9b8573d3263d06e2c44fa38376b1c3970e74f0"} Nov 26 17:15:39 crc kubenswrapper[5010]: I1126 17:15:39.460313 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerStarted","Data":"b2294a251a32e6096ea73b7dcd02c26a5de81f0909898e199457d9bef0642e33"} Nov 26 17:15:40 crc kubenswrapper[5010]: I1126 17:15:40.517833 5010 generic.go:334] "Generic (PLEG): container finished" podID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerID="e3af9289ac145a4bcefbd635dc9b8573d3263d06e2c44fa38376b1c3970e74f0" exitCode=0 Nov 26 17:15:40 crc kubenswrapper[5010]: I1126 17:15:40.518109 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerDied","Data":"e3af9289ac145a4bcefbd635dc9b8573d3263d06e2c44fa38376b1c3970e74f0"} Nov 26 17:15:41 crc kubenswrapper[5010]: I1126 17:15:41.554917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8qlmg" event={"ID":"9f46a5c3-1928-4904-8122-6a698f5b7b4a","Type":"ContainerStarted","Data":"8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b"} Nov 26 17:15:41 crc kubenswrapper[5010]: I1126 17:15:41.609181 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8qlmg" podStartSLOduration=3.663723747 podStartE2EDuration="7.609161058s" podCreationTimestamp="2025-11-26 17:15:34 +0000 UTC" firstStartedPulling="2025-11-26 17:15:36.37726305 +0000 UTC m=+6557.167980198" lastFinishedPulling="2025-11-26 17:15:40.322700361 +0000 UTC m=+6561.113417509" observedRunningTime="2025-11-26 17:15:41.604886432 +0000 UTC m=+6562.395603580" watchObservedRunningTime="2025-11-26 17:15:41.609161058 +0000 UTC m=+6562.399878206" Nov 26 17:15:42 crc kubenswrapper[5010]: I1126 17:15:42.566924 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerStarted","Data":"bec7e5fa6d421f4a353b8d3351c007760199b70a8a6275029c829cec64c9ec15"} Nov 26 17:15:44 crc kubenswrapper[5010]: I1126 17:15:44.616782 5010 generic.go:334] "Generic (PLEG): container finished" podID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerID="bec7e5fa6d421f4a353b8d3351c007760199b70a8a6275029c829cec64c9ec15" exitCode=0 Nov 26 17:15:44 crc kubenswrapper[5010]: I1126 17:15:44.618878 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerDied","Data":"bec7e5fa6d421f4a353b8d3351c007760199b70a8a6275029c829cec64c9ec15"} Nov 26 17:15:44 crc kubenswrapper[5010]: I1126 17:15:44.892214 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:15:44 crc kubenswrapper[5010]: E1126 17:15:44.892572 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:15:45 crc kubenswrapper[5010]: I1126 17:15:45.349396 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:45 crc kubenswrapper[5010]: I1126 17:15:45.350683 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:15:45 crc kubenswrapper[5010]: I1126 17:15:45.630869 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerStarted","Data":"c235e1dc0a1dd8a34052491d6cd4cfca3c2f52182fbe921dc1d87a2e2fbef483"} Nov 26 17:15:45 crc kubenswrapper[5010]: I1126 17:15:45.650635 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cdxj2" podStartSLOduration=3.770543606 podStartE2EDuration="8.650619187s" podCreationTimestamp="2025-11-26 17:15:37 +0000 UTC" firstStartedPulling="2025-11-26 17:15:40.520084028 +0000 UTC m=+6561.310801176" lastFinishedPulling="2025-11-26 17:15:45.400159599 +0000 UTC m=+6566.190876757" observedRunningTime="2025-11-26 17:15:45.646670008 +0000 UTC m=+6566.437387156" watchObservedRunningTime="2025-11-26 17:15:45.650619187 +0000 UTC m=+6566.441336335" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.297850 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.299963 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.304084 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.304397 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-qdxdb" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.304543 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.313889 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdbhk\" (UniqueName: \"kubernetes.io/projected/f4f66357-4d7b-4f37-a905-c26b934dfcf7-kube-api-access-xdbhk\") pod \"obo-prometheus-operator-668cf9dfbb-f8k45\" (UID: \"f4f66357-4d7b-4f37-a905-c26b934dfcf7\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.315939 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.322030 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.326265 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.326449 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-xjpb2" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.349998 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.351460 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.371435 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.397777 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.407935 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8qlmg" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" probeResult="failure" output=< Nov 26 17:15:46 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:15:46 crc kubenswrapper[5010]: > Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.411342 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.418705 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdbhk\" (UniqueName: \"kubernetes.io/projected/f4f66357-4d7b-4f37-a905-c26b934dfcf7-kube-api-access-xdbhk\") pod \"obo-prometheus-operator-668cf9dfbb-f8k45\" (UID: \"f4f66357-4d7b-4f37-a905-c26b934dfcf7\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.455500 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdbhk\" (UniqueName: \"kubernetes.io/projected/f4f66357-4d7b-4f37-a905-c26b934dfcf7-kube-api-access-xdbhk\") pod \"obo-prometheus-operator-668cf9dfbb-f8k45\" (UID: \"f4f66357-4d7b-4f37-a905-c26b934dfcf7\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.520398 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2caad199-2ff2-4de0-bdfd-118c2384c891-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn\" (UID: \"2caad199-2ff2-4de0-bdfd-118c2384c891\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.520766 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/01afdb7b-0479-43db-959f-431508c4f71e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d\" (UID: \"01afdb7b-0479-43db-959f-431508c4f71e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.520894 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/01afdb7b-0479-43db-959f-431508c4f71e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d\" (UID: \"01afdb7b-0479-43db-959f-431508c4f71e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.520925 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2caad199-2ff2-4de0-bdfd-118c2384c891-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn\" (UID: \"2caad199-2ff2-4de0-bdfd-118c2384c891\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.525788 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l5mtz"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.531269 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.548108 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l5mtz"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.555092 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-k6mnb" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.555965 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.618218 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.625598 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9qfh\" (UniqueName: \"kubernetes.io/projected/30f18a72-40dd-49af-a43d-208554ff5d05-kube-api-access-x9qfh\") pod \"observability-operator-d8bb48f5d-l5mtz\" (UID: \"30f18a72-40dd-49af-a43d-208554ff5d05\") " pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.625650 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2caad199-2ff2-4de0-bdfd-118c2384c891-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn\" (UID: \"2caad199-2ff2-4de0-bdfd-118c2384c891\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.625700 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/30f18a72-40dd-49af-a43d-208554ff5d05-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l5mtz\" (UID: \"30f18a72-40dd-49af-a43d-208554ff5d05\") " pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.625765 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/01afdb7b-0479-43db-959f-431508c4f71e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d\" (UID: \"01afdb7b-0479-43db-959f-431508c4f71e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.625811 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/01afdb7b-0479-43db-959f-431508c4f71e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d\" (UID: \"01afdb7b-0479-43db-959f-431508c4f71e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.625831 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2caad199-2ff2-4de0-bdfd-118c2384c891-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn\" (UID: \"2caad199-2ff2-4de0-bdfd-118c2384c891\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.633459 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2caad199-2ff2-4de0-bdfd-118c2384c891-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn\" (UID: \"2caad199-2ff2-4de0-bdfd-118c2384c891\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.633855 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2caad199-2ff2-4de0-bdfd-118c2384c891-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn\" (UID: \"2caad199-2ff2-4de0-bdfd-118c2384c891\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.640574 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/01afdb7b-0479-43db-959f-431508c4f71e-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d\" (UID: \"01afdb7b-0479-43db-959f-431508c4f71e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.640927 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/01afdb7b-0479-43db-959f-431508c4f71e-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d\" (UID: \"01afdb7b-0479-43db-959f-431508c4f71e\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.667207 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.731387 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/30f18a72-40dd-49af-a43d-208554ff5d05-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l5mtz\" (UID: \"30f18a72-40dd-49af-a43d-208554ff5d05\") " pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.731670 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9qfh\" (UniqueName: \"kubernetes.io/projected/30f18a72-40dd-49af-a43d-208554ff5d05-kube-api-access-x9qfh\") pod \"observability-operator-d8bb48f5d-l5mtz\" (UID: \"30f18a72-40dd-49af-a43d-208554ff5d05\") " pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.731408 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-2dx82"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.733464 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.736512 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/30f18a72-40dd-49af-a43d-208554ff5d05-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-l5mtz\" (UID: \"30f18a72-40dd-49af-a43d-208554ff5d05\") " pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.738638 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-4f27h" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.774083 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9qfh\" (UniqueName: \"kubernetes.io/projected/30f18a72-40dd-49af-a43d-208554ff5d05-kube-api-access-x9qfh\") pod \"observability-operator-d8bb48f5d-l5mtz\" (UID: \"30f18a72-40dd-49af-a43d-208554ff5d05\") " pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.795860 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-2dx82"] Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.862382 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.939019 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/85358935-d7cf-4109-8bea-451aa3150b5c-openshift-service-ca\") pod \"perses-operator-5446b9c989-2dx82\" (UID: \"85358935-d7cf-4109-8bea-451aa3150b5c\") " pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.939604 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" Nov 26 17:15:46 crc kubenswrapper[5010]: I1126 17:15:46.943586 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4p9m\" (UniqueName: \"kubernetes.io/projected/85358935-d7cf-4109-8bea-451aa3150b5c-kube-api-access-f4p9m\") pod \"perses-operator-5446b9c989-2dx82\" (UID: \"85358935-d7cf-4109-8bea-451aa3150b5c\") " pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.048666 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/85358935-d7cf-4109-8bea-451aa3150b5c-openshift-service-ca\") pod \"perses-operator-5446b9c989-2dx82\" (UID: \"85358935-d7cf-4109-8bea-451aa3150b5c\") " pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.048770 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4p9m\" (UniqueName: \"kubernetes.io/projected/85358935-d7cf-4109-8bea-451aa3150b5c-kube-api-access-f4p9m\") pod \"perses-operator-5446b9c989-2dx82\" (UID: \"85358935-d7cf-4109-8bea-451aa3150b5c\") " pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.049963 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/85358935-d7cf-4109-8bea-451aa3150b5c-openshift-service-ca\") pod \"perses-operator-5446b9c989-2dx82\" (UID: \"85358935-d7cf-4109-8bea-451aa3150b5c\") " pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.072510 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4p9m\" (UniqueName: \"kubernetes.io/projected/85358935-d7cf-4109-8bea-451aa3150b5c-kube-api-access-f4p9m\") pod \"perses-operator-5446b9c989-2dx82\" (UID: \"85358935-d7cf-4109-8bea-451aa3150b5c\") " pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.189634 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.280600 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45"] Nov 26 17:15:47 crc kubenswrapper[5010]: W1126 17:15:47.281656 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4f66357_4d7b_4f37_a905_c26b934dfcf7.slice/crio-41e48caa96204698e76b0aa6fe49957579fe0b36441395c9f81db4028aa74e2f WatchSource:0}: Error finding container 41e48caa96204698e76b0aa6fe49957579fe0b36441395c9f81db4028aa74e2f: Status 404 returned error can't find the container with id 41e48caa96204698e76b0aa6fe49957579fe0b36441395c9f81db4028aa74e2f Nov 26 17:15:47 crc kubenswrapper[5010]: W1126 17:15:47.429325 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2caad199_2ff2_4de0_bdfd_118c2384c891.slice/crio-09e2cffe8b46f92530bcec168e3840a09dc9eec4fd0167a13472f82b5a788eff WatchSource:0}: Error finding container 09e2cffe8b46f92530bcec168e3840a09dc9eec4fd0167a13472f82b5a788eff: Status 404 returned error can't find the container with id 09e2cffe8b46f92530bcec168e3840a09dc9eec4fd0167a13472f82b5a788eff Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.438926 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn"] Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.460853 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-l5mtz"] Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.583536 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d"] Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.684983 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" event={"ID":"f4f66357-4d7b-4f37-a905-c26b934dfcf7","Type":"ContainerStarted","Data":"41e48caa96204698e76b0aa6fe49957579fe0b36441395c9f81db4028aa74e2f"} Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.689218 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" event={"ID":"01afdb7b-0479-43db-959f-431508c4f71e","Type":"ContainerStarted","Data":"6bb8af5cac889439cc1037802cc780985fc064118a430a143809b97b0146dcb4"} Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.690730 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" event={"ID":"2caad199-2ff2-4de0-bdfd-118c2384c891","Type":"ContainerStarted","Data":"09e2cffe8b46f92530bcec168e3840a09dc9eec4fd0167a13472f82b5a788eff"} Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.692055 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" event={"ID":"30f18a72-40dd-49af-a43d-208554ff5d05","Type":"ContainerStarted","Data":"e0b9adbe5e6b12a8aac5470255ede4532bf7f40b4b65831a7439d0fd2101a364"} Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.757000 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.757096 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:15:47 crc kubenswrapper[5010]: I1126 17:15:47.836408 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-2dx82"] Nov 26 17:15:47 crc kubenswrapper[5010]: W1126 17:15:47.861410 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85358935_d7cf_4109_8bea_451aa3150b5c.slice/crio-dc7d5efef482b3ddd248c298128952ac21d0a51a16b719cd7d296253070c9861 WatchSource:0}: Error finding container dc7d5efef482b3ddd248c298128952ac21d0a51a16b719cd7d296253070c9861: Status 404 returned error can't find the container with id dc7d5efef482b3ddd248c298128952ac21d0a51a16b719cd7d296253070c9861 Nov 26 17:15:48 crc kubenswrapper[5010]: I1126 17:15:48.706613 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-2dx82" event={"ID":"85358935-d7cf-4109-8bea-451aa3150b5c","Type":"ContainerStarted","Data":"dc7d5efef482b3ddd248c298128952ac21d0a51a16b719cd7d296253070c9861"} Nov 26 17:15:48 crc kubenswrapper[5010]: I1126 17:15:48.828593 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-cdxj2" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="registry-server" probeResult="failure" output=< Nov 26 17:15:48 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:15:48 crc kubenswrapper[5010]: > Nov 26 17:15:56 crc kubenswrapper[5010]: I1126 17:15:56.422390 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8qlmg" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" probeResult="failure" output=< Nov 26 17:15:56 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:15:56 crc kubenswrapper[5010]: > Nov 26 17:15:56 crc kubenswrapper[5010]: I1126 17:15:56.891673 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:15:56 crc kubenswrapper[5010]: E1126 17:15:56.892143 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.804663 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" event={"ID":"30f18a72-40dd-49af-a43d-208554ff5d05","Type":"ContainerStarted","Data":"04d5485f73bfd7b46378f7b26c1d89b5fb666f5b6a70115d98f57e71f753241a"} Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.805292 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.810739 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.822652 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" event={"ID":"f4f66357-4d7b-4f37-a905-c26b934dfcf7","Type":"ContainerStarted","Data":"cf818d9beb4acaf8a74db11f12e42e985e36073815cf1b05dd61ad0114b74d28"} Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.826264 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" event={"ID":"01afdb7b-0479-43db-959f-431508c4f71e","Type":"ContainerStarted","Data":"040a495cf3fdc53858f80aaace20920c240a0f811c8fa7457a1c6cfc97baee63"} Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.833618 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" event={"ID":"2caad199-2ff2-4de0-bdfd-118c2384c891","Type":"ContainerStarted","Data":"e089f9a2223032dba3d8ea65f161be485e2dc473ffe6c32147092629ab19516f"} Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.833907 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-l5mtz" podStartSLOduration=2.23845326 podStartE2EDuration="11.833884455s" podCreationTimestamp="2025-11-26 17:15:46 +0000 UTC" firstStartedPulling="2025-11-26 17:15:47.493410206 +0000 UTC m=+6568.284127354" lastFinishedPulling="2025-11-26 17:15:57.088841401 +0000 UTC m=+6577.879558549" observedRunningTime="2025-11-26 17:15:57.829313722 +0000 UTC m=+6578.620030870" watchObservedRunningTime="2025-11-26 17:15:57.833884455 +0000 UTC m=+6578.624601603" Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.838727 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-2dx82" event={"ID":"85358935-d7cf-4109-8bea-451aa3150b5c","Type":"ContainerStarted","Data":"46df9647bfcf2397a7b5bf1c3b8a29e7a70d4e395d60e2ff29c7f4e09812a0c9"} Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.839697 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.859760 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d" podStartSLOduration=2.488172129 podStartE2EDuration="11.859741408s" podCreationTimestamp="2025-11-26 17:15:46 +0000 UTC" firstStartedPulling="2025-11-26 17:15:47.585517596 +0000 UTC m=+6568.376234754" lastFinishedPulling="2025-11-26 17:15:56.957086885 +0000 UTC m=+6577.747804033" observedRunningTime="2025-11-26 17:15:57.853256247 +0000 UTC m=+6578.643973395" watchObservedRunningTime="2025-11-26 17:15:57.859741408 +0000 UTC m=+6578.650458576" Nov 26 17:15:57 crc kubenswrapper[5010]: I1126 17:15:57.923328 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-f8k45" podStartSLOduration=2.250544481 podStartE2EDuration="11.923304439s" podCreationTimestamp="2025-11-26 17:15:46 +0000 UTC" firstStartedPulling="2025-11-26 17:15:47.284358768 +0000 UTC m=+6568.075075916" lastFinishedPulling="2025-11-26 17:15:56.957118726 +0000 UTC m=+6577.747835874" observedRunningTime="2025-11-26 17:15:57.882457243 +0000 UTC m=+6578.673174411" watchObservedRunningTime="2025-11-26 17:15:57.923304439 +0000 UTC m=+6578.714021587" Nov 26 17:15:58 crc kubenswrapper[5010]: I1126 17:15:58.005319 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn" podStartSLOduration=2.487258147 podStartE2EDuration="12.005300078s" podCreationTimestamp="2025-11-26 17:15:46 +0000 UTC" firstStartedPulling="2025-11-26 17:15:47.432331937 +0000 UTC m=+6568.223049085" lastFinishedPulling="2025-11-26 17:15:56.950373868 +0000 UTC m=+6577.741091016" observedRunningTime="2025-11-26 17:15:57.980266465 +0000 UTC m=+6578.770983613" watchObservedRunningTime="2025-11-26 17:15:58.005300078 +0000 UTC m=+6578.796017226" Nov 26 17:15:58 crc kubenswrapper[5010]: I1126 17:15:58.818891 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-cdxj2" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="registry-server" probeResult="failure" output=< Nov 26 17:15:58 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:15:58 crc kubenswrapper[5010]: > Nov 26 17:16:06 crc kubenswrapper[5010]: I1126 17:16:06.395892 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8qlmg" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" probeResult="failure" output=< Nov 26 17:16:06 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:16:06 crc kubenswrapper[5010]: > Nov 26 17:16:07 crc kubenswrapper[5010]: I1126 17:16:07.193027 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-2dx82" Nov 26 17:16:07 crc kubenswrapper[5010]: I1126 17:16:07.218733 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-2dx82" podStartSLOduration=12.0448647 podStartE2EDuration="21.218695853s" podCreationTimestamp="2025-11-26 17:15:46 +0000 UTC" firstStartedPulling="2025-11-26 17:15:47.864554744 +0000 UTC m=+6568.655271902" lastFinishedPulling="2025-11-26 17:15:57.038385907 +0000 UTC m=+6577.829103055" observedRunningTime="2025-11-26 17:15:58.013309967 +0000 UTC m=+6578.804027115" watchObservedRunningTime="2025-11-26 17:16:07.218695853 +0000 UTC m=+6588.009413001" Nov 26 17:16:07 crc kubenswrapper[5010]: I1126 17:16:07.828287 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:16:07 crc kubenswrapper[5010]: I1126 17:16:07.887123 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:16:09 crc kubenswrapper[5010]: I1126 17:16:09.651396 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cdxj2"] Nov 26 17:16:09 crc kubenswrapper[5010]: I1126 17:16:09.652020 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cdxj2" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="registry-server" containerID="cri-o://c235e1dc0a1dd8a34052491d6cd4cfca3c2f52182fbe921dc1d87a2e2fbef483" gracePeriod=2 Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.007196 5010 generic.go:334] "Generic (PLEG): container finished" podID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerID="c235e1dc0a1dd8a34052491d6cd4cfca3c2f52182fbe921dc1d87a2e2fbef483" exitCode=0 Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.007271 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerDied","Data":"c235e1dc0a1dd8a34052491d6cd4cfca3c2f52182fbe921dc1d87a2e2fbef483"} Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.271224 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.367215 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-utilities\") pod \"991b800c-a342-4ff2-b6ca-e66eac243ae5\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.367377 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-catalog-content\") pod \"991b800c-a342-4ff2-b6ca-e66eac243ae5\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.367491 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t847q\" (UniqueName: \"kubernetes.io/projected/991b800c-a342-4ff2-b6ca-e66eac243ae5-kube-api-access-t847q\") pod \"991b800c-a342-4ff2-b6ca-e66eac243ae5\" (UID: \"991b800c-a342-4ff2-b6ca-e66eac243ae5\") " Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.369384 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-utilities" (OuterVolumeSpecName: "utilities") pod "991b800c-a342-4ff2-b6ca-e66eac243ae5" (UID: "991b800c-a342-4ff2-b6ca-e66eac243ae5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.374935 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/991b800c-a342-4ff2-b6ca-e66eac243ae5-kube-api-access-t847q" (OuterVolumeSpecName: "kube-api-access-t847q") pod "991b800c-a342-4ff2-b6ca-e66eac243ae5" (UID: "991b800c-a342-4ff2-b6ca-e66eac243ae5"). InnerVolumeSpecName "kube-api-access-t847q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.426463 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "991b800c-a342-4ff2-b6ca-e66eac243ae5" (UID: "991b800c-a342-4ff2-b6ca-e66eac243ae5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.470144 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t847q\" (UniqueName: \"kubernetes.io/projected/991b800c-a342-4ff2-b6ca-e66eac243ae5-kube-api-access-t847q\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.470179 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.470188 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b800c-a342-4ff2-b6ca-e66eac243ae5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.563649 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.564115 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="34a5290e-e2d7-407e-9c56-adedc14140a4" containerName="openstackclient" containerID="cri-o://4d373c9426554371258c7ba49ae4a0ebc68f14ff98789c572ff83d9adb33d716" gracePeriod=2 Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.578126 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.655027 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:10 crc kubenswrapper[5010]: E1126 17:16:10.655438 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="registry-server" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.655450 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="registry-server" Nov 26 17:16:10 crc kubenswrapper[5010]: E1126 17:16:10.655482 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="34a5290e-e2d7-407e-9c56-adedc14140a4" containerName="openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.655488 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="34a5290e-e2d7-407e-9c56-adedc14140a4" containerName="openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: E1126 17:16:10.655518 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="extract-utilities" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.655525 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="extract-utilities" Nov 26 17:16:10 crc kubenswrapper[5010]: E1126 17:16:10.655540 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="extract-content" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.655545 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="extract-content" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.655742 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="34a5290e-e2d7-407e-9c56-adedc14140a4" containerName="openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.655755 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" containerName="registry-server" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.656419 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.674649 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.674786 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7th79\" (UniqueName: \"kubernetes.io/projected/b17ee974-7531-47d7-960b-e6ff6dec4e05-kube-api-access-7th79\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.674819 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config-secret\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.674849 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.680422 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.705116 5010 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="34a5290e-e2d7-407e-9c56-adedc14140a4" podUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.779883 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.780042 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.780106 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7th79\" (UniqueName: \"kubernetes.io/projected/b17ee974-7531-47d7-960b-e6ff6dec4e05-kube-api-access-7th79\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.780126 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config-secret\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.784856 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.807179 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config-secret\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.818384 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-combined-ca-bundle\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.845320 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7th79\" (UniqueName: \"kubernetes.io/projected/b17ee974-7531-47d7-960b-e6ff6dec4e05-kube-api-access-7th79\") pod \"openstackclient\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " pod="openstack/openstackclient" Nov 26 17:16:10 crc kubenswrapper[5010]: I1126 17:16:10.985830 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.029822 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.031124 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.041233 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-vfgbq" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.095029 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cdxj2" event={"ID":"991b800c-a342-4ff2-b6ca-e66eac243ae5","Type":"ContainerDied","Data":"b2294a251a32e6096ea73b7dcd02c26a5de81f0909898e199457d9bef0642e33"} Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.095100 5010 scope.go:117] "RemoveContainer" containerID="c235e1dc0a1dd8a34052491d6cd4cfca3c2f52182fbe921dc1d87a2e2fbef483" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.095287 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cdxj2" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.134107 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.191387 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rt4lp\" (UniqueName: \"kubernetes.io/projected/614425eb-8ee8-405f-a428-d98ded958f1a-kube-api-access-rt4lp\") pod \"kube-state-metrics-0\" (UID: \"614425eb-8ee8-405f-a428-d98ded958f1a\") " pod="openstack/kube-state-metrics-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.229089 5010 scope.go:117] "RemoveContainer" containerID="bec7e5fa6d421f4a353b8d3351c007760199b70a8a6275029c829cec64c9ec15" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.240958 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cdxj2"] Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.328975 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rt4lp\" (UniqueName: \"kubernetes.io/projected/614425eb-8ee8-405f-a428-d98ded958f1a-kube-api-access-rt4lp\") pod \"kube-state-metrics-0\" (UID: \"614425eb-8ee8-405f-a428-d98ded958f1a\") " pod="openstack/kube-state-metrics-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.398195 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rt4lp\" (UniqueName: \"kubernetes.io/projected/614425eb-8ee8-405f-a428-d98ded958f1a-kube-api-access-rt4lp\") pod \"kube-state-metrics-0\" (UID: \"614425eb-8ee8-405f-a428-d98ded958f1a\") " pod="openstack/kube-state-metrics-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.398369 5010 scope.go:117] "RemoveContainer" containerID="e3af9289ac145a4bcefbd635dc9b8573d3263d06e2c44fa38376b1c3970e74f0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.419404 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cdxj2"] Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.491112 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.677203 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.683469 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.688215 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.688473 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.688627 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.688781 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.689005 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-sdwdn" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.718521 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.756101 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.756158 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/81478787-2999-4b71-94f6-b4e1c2618f2a-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.756263 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/81478787-2999-4b71-94f6-b4e1c2618f2a-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.756292 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlwql\" (UniqueName: \"kubernetes.io/projected/81478787-2999-4b71-94f6-b4e1c2618f2a-kube-api-access-jlwql\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.756317 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.756349 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.756446 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/81478787-2999-4b71-94f6-b4e1c2618f2a-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.859065 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.859109 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/81478787-2999-4b71-94f6-b4e1c2618f2a-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.859184 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/81478787-2999-4b71-94f6-b4e1c2618f2a-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.859220 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlwql\" (UniqueName: \"kubernetes.io/projected/81478787-2999-4b71-94f6-b4e1c2618f2a-kube-api-access-jlwql\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.859243 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.859274 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.859358 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/81478787-2999-4b71-94f6-b4e1c2618f2a-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.860005 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/81478787-2999-4b71-94f6-b4e1c2618f2a-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.865420 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/81478787-2999-4b71-94f6-b4e1c2618f2a-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.865496 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.865851 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/81478787-2999-4b71-94f6-b4e1c2618f2a-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.883234 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.885365 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/81478787-2999-4b71-94f6-b4e1c2618f2a-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.908025 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:16:11 crc kubenswrapper[5010]: E1126 17:16:11.908517 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.915312 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlwql\" (UniqueName: \"kubernetes.io/projected/81478787-2999-4b71-94f6-b4e1c2618f2a-kube-api-access-jlwql\") pod \"alertmanager-metric-storage-0\" (UID: \"81478787-2999-4b71-94f6-b4e1c2618f2a\") " pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.931143 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="991b800c-a342-4ff2-b6ca-e66eac243ae5" path="/var/lib/kubelet/pods/991b800c-a342-4ff2-b6ca-e66eac243ae5/volumes" Nov 26 17:16:11 crc kubenswrapper[5010]: I1126 17:16:11.935254 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.014023 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.156036 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b17ee974-7531-47d7-960b-e6ff6dec4e05","Type":"ContainerStarted","Data":"0729bba116dc23ce6baa080bfa31f38ba6cd298d2f69d35f54f870fbd13d9bbf"} Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.317011 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.320763 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.325089 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.325154 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.325344 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.325424 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.325497 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.325738 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-2kzxp" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.336659 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.385509 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3e7cbaba-7948-4346-a003-297a5cf57d45-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.387478 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3e7cbaba-7948-4346-a003-297a5cf57d45-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.387701 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.387856 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7fsv\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-kube-api-access-t7fsv\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.387886 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.387955 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.397128 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-config\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.397225 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.419274 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499338 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499398 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3e7cbaba-7948-4346-a003-297a5cf57d45-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499444 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3e7cbaba-7948-4346-a003-297a5cf57d45-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499539 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499613 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7fsv\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-kube-api-access-t7fsv\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499648 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499676 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.499777 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-config\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.507251 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3e7cbaba-7948-4346-a003-297a5cf57d45-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.507415 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3e7cbaba-7948-4346-a003-297a5cf57d45-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.507819 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.507997 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-config\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.508018 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.523500 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.523565 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d10ddf75f13d4a3911b74a68fa4825e3b3114b10125bd4519bf25b697d859ae9/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.538459 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7fsv\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-kube-api-access-t7fsv\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.539021 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.599259 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.657284 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:12 crc kubenswrapper[5010]: I1126 17:16:12.707932 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.172190 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"b17ee974-7531-47d7-960b-e6ff6dec4e05","Type":"ContainerStarted","Data":"8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d"} Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.177905 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"81478787-2999-4b71-94f6-b4e1c2618f2a","Type":"ContainerStarted","Data":"9f033767836ec9e7432dfa993174f85826dae3ab059f1ec898f7900a837caf53"} Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.192904 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"614425eb-8ee8-405f-a428-d98ded958f1a","Type":"ContainerStarted","Data":"af4c0c9f96e9644d6f21b757e404813ec58d9ef922363a2051c0fce48caa2e42"} Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.197081 5010 generic.go:334] "Generic (PLEG): container finished" podID="34a5290e-e2d7-407e-9c56-adedc14140a4" containerID="4d373c9426554371258c7ba49ae4a0ebc68f14ff98789c572ff83d9adb33d716" exitCode=137 Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.197135 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2233b229e65fd72397bbd7e1fc4b0126ab6e6eca6bf22ea5a6cf8cbe462d2df7" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.207323 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.207304196 podStartE2EDuration="3.207304196s" podCreationTimestamp="2025-11-26 17:16:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:16:13.200473536 +0000 UTC m=+6593.991190684" watchObservedRunningTime="2025-11-26 17:16:13.207304196 +0000 UTC m=+6593.998021344" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.273832 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.307520 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.431039 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc5k4\" (UniqueName: \"kubernetes.io/projected/34a5290e-e2d7-407e-9c56-adedc14140a4-kube-api-access-bc5k4\") pod \"34a5290e-e2d7-407e-9c56-adedc14140a4\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.431153 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config\") pod \"34a5290e-e2d7-407e-9c56-adedc14140a4\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.431207 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config-secret\") pod \"34a5290e-e2d7-407e-9c56-adedc14140a4\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.431244 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-combined-ca-bundle\") pod \"34a5290e-e2d7-407e-9c56-adedc14140a4\" (UID: \"34a5290e-e2d7-407e-9c56-adedc14140a4\") " Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.439963 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/34a5290e-e2d7-407e-9c56-adedc14140a4-kube-api-access-bc5k4" (OuterVolumeSpecName: "kube-api-access-bc5k4") pod "34a5290e-e2d7-407e-9c56-adedc14140a4" (UID: "34a5290e-e2d7-407e-9c56-adedc14140a4"). InnerVolumeSpecName "kube-api-access-bc5k4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.473357 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "34a5290e-e2d7-407e-9c56-adedc14140a4" (UID: "34a5290e-e2d7-407e-9c56-adedc14140a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.474763 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "34a5290e-e2d7-407e-9c56-adedc14140a4" (UID: "34a5290e-e2d7-407e-9c56-adedc14140a4"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.507871 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "34a5290e-e2d7-407e-9c56-adedc14140a4" (UID: "34a5290e-e2d7-407e-9c56-adedc14140a4"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.534610 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc5k4\" (UniqueName: \"kubernetes.io/projected/34a5290e-e2d7-407e-9c56-adedc14140a4-kube-api-access-bc5k4\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.534652 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.534661 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.534670 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/34a5290e-e2d7-407e-9c56-adedc14140a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:13 crc kubenswrapper[5010]: I1126 17:16:13.912975 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="34a5290e-e2d7-407e-9c56-adedc14140a4" path="/var/lib/kubelet/pods/34a5290e-e2d7-407e-9c56-adedc14140a4/volumes" Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.068662 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-7krs5"] Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.080851 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-3cbe-account-create-update-2z9p8"] Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.096315 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-3cbe-account-create-update-2z9p8"] Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.112030 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-7krs5"] Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.206630 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"614425eb-8ee8-405f-a428-d98ded958f1a","Type":"ContainerStarted","Data":"979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79"} Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.206766 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.209043 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.209335 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerStarted","Data":"9fd58a83fd0b68cc1ad55e2842f9912e8bb0911fa0478cb9c11018d0dc17eb04"} Nov 26 17:16:14 crc kubenswrapper[5010]: I1126 17:16:14.229129 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.696321255 podStartE2EDuration="4.229110563s" podCreationTimestamp="2025-11-26 17:16:10 +0000 UTC" firstStartedPulling="2025-11-26 17:16:12.442772686 +0000 UTC m=+6593.233489834" lastFinishedPulling="2025-11-26 17:16:12.975561994 +0000 UTC m=+6593.766279142" observedRunningTime="2025-11-26 17:16:14.219425452 +0000 UTC m=+6595.010142600" watchObservedRunningTime="2025-11-26 17:16:14.229110563 +0000 UTC m=+6595.019827711" Nov 26 17:16:15 crc kubenswrapper[5010]: I1126 17:16:15.903545 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1919efcb-b975-4346-83d6-5a62ecb38f8e" path="/var/lib/kubelet/pods/1919efcb-b975-4346-83d6-5a62ecb38f8e/volumes" Nov 26 17:16:15 crc kubenswrapper[5010]: I1126 17:16:15.904841 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea" path="/var/lib/kubelet/pods/2dd9f4f8-42b0-49e7-a195-ccb1f26ca7ea/volumes" Nov 26 17:16:16 crc kubenswrapper[5010]: I1126 17:16:16.462087 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8qlmg" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" probeResult="failure" output=< Nov 26 17:16:16 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:16:16 crc kubenswrapper[5010]: > Nov 26 17:16:19 crc kubenswrapper[5010]: I1126 17:16:19.263286 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerStarted","Data":"c7de79f478fe52c3e3858c73017a3e4013111589937be1977b86aa26604d86cd"} Nov 26 17:16:19 crc kubenswrapper[5010]: I1126 17:16:19.264654 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"81478787-2999-4b71-94f6-b4e1c2618f2a","Type":"ContainerStarted","Data":"2c8c069ff9b32d18e743611a1a7b1c1aa5a9e5230cf0f2584303a8fc7954fa47"} Nov 26 17:16:21 crc kubenswrapper[5010]: I1126 17:16:21.035120 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-mgrwf"] Nov 26 17:16:21 crc kubenswrapper[5010]: I1126 17:16:21.047995 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-mgrwf"] Nov 26 17:16:21 crc kubenswrapper[5010]: I1126 17:16:21.496749 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 17:16:21 crc kubenswrapper[5010]: I1126 17:16:21.903774 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01e05e22-ea98-435b-a24a-1d46bd501bff" path="/var/lib/kubelet/pods/01e05e22-ea98-435b-a24a-1d46bd501bff/volumes" Nov 26 17:16:25 crc kubenswrapper[5010]: I1126 17:16:25.418987 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:16:25 crc kubenswrapper[5010]: I1126 17:16:25.469139 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:16:25 crc kubenswrapper[5010]: I1126 17:16:25.661724 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8qlmg"] Nov 26 17:16:26 crc kubenswrapper[5010]: I1126 17:16:26.892106 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:16:26 crc kubenswrapper[5010]: E1126 17:16:26.892678 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:16:27 crc kubenswrapper[5010]: I1126 17:16:27.371336 5010 generic.go:334] "Generic (PLEG): container finished" podID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerID="c7de79f478fe52c3e3858c73017a3e4013111589937be1977b86aa26604d86cd" exitCode=0 Nov 26 17:16:27 crc kubenswrapper[5010]: I1126 17:16:27.371467 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerDied","Data":"c7de79f478fe52c3e3858c73017a3e4013111589937be1977b86aa26604d86cd"} Nov 26 17:16:27 crc kubenswrapper[5010]: I1126 17:16:27.374913 5010 generic.go:334] "Generic (PLEG): container finished" podID="81478787-2999-4b71-94f6-b4e1c2618f2a" containerID="2c8c069ff9b32d18e743611a1a7b1c1aa5a9e5230cf0f2584303a8fc7954fa47" exitCode=0 Nov 26 17:16:27 crc kubenswrapper[5010]: I1126 17:16:27.374973 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"81478787-2999-4b71-94f6-b4e1c2618f2a","Type":"ContainerDied","Data":"2c8c069ff9b32d18e743611a1a7b1c1aa5a9e5230cf0f2584303a8fc7954fa47"} Nov 26 17:16:27 crc kubenswrapper[5010]: I1126 17:16:27.375175 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8qlmg" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" containerID="cri-o://8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b" gracePeriod=2 Nov 26 17:16:27 crc kubenswrapper[5010]: I1126 17:16:27.985877 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.090478 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-catalog-content\") pod \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.090601 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-utilities\") pod \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.090771 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m89x9\" (UniqueName: \"kubernetes.io/projected/9f46a5c3-1928-4904-8122-6a698f5b7b4a-kube-api-access-m89x9\") pod \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\" (UID: \"9f46a5c3-1928-4904-8122-6a698f5b7b4a\") " Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.092098 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-utilities" (OuterVolumeSpecName: "utilities") pod "9f46a5c3-1928-4904-8122-6a698f5b7b4a" (UID: "9f46a5c3-1928-4904-8122-6a698f5b7b4a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.102068 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f46a5c3-1928-4904-8122-6a698f5b7b4a-kube-api-access-m89x9" (OuterVolumeSpecName: "kube-api-access-m89x9") pod "9f46a5c3-1928-4904-8122-6a698f5b7b4a" (UID: "9f46a5c3-1928-4904-8122-6a698f5b7b4a"). InnerVolumeSpecName "kube-api-access-m89x9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.200412 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.200451 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m89x9\" (UniqueName: \"kubernetes.io/projected/9f46a5c3-1928-4904-8122-6a698f5b7b4a-kube-api-access-m89x9\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.203558 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9f46a5c3-1928-4904-8122-6a698f5b7b4a" (UID: "9f46a5c3-1928-4904-8122-6a698f5b7b4a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.301398 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9f46a5c3-1928-4904-8122-6a698f5b7b4a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.389116 5010 generic.go:334] "Generic (PLEG): container finished" podID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerID="8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b" exitCode=0 Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.389165 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8qlmg" event={"ID":"9f46a5c3-1928-4904-8122-6a698f5b7b4a","Type":"ContainerDied","Data":"8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b"} Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.389193 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8qlmg" event={"ID":"9f46a5c3-1928-4904-8122-6a698f5b7b4a","Type":"ContainerDied","Data":"bb840a69f256e071ee1a93826488f964598b43c593e0e285a26077a99c570a8a"} Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.389197 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8qlmg" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.389220 5010 scope.go:117] "RemoveContainer" containerID="8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.427305 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8qlmg"] Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.427690 5010 scope.go:117] "RemoveContainer" containerID="b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.436980 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8qlmg"] Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.447625 5010 scope.go:117] "RemoveContainer" containerID="78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.497611 5010 scope.go:117] "RemoveContainer" containerID="8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b" Nov 26 17:16:28 crc kubenswrapper[5010]: E1126 17:16:28.498049 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b\": container with ID starting with 8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b not found: ID does not exist" containerID="8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.498079 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b"} err="failed to get container status \"8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b\": rpc error: code = NotFound desc = could not find container \"8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b\": container with ID starting with 8a2203a6072129df67c412c9cc6ac271edc97185b8b16c8373681dc36670e60b not found: ID does not exist" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.498101 5010 scope.go:117] "RemoveContainer" containerID="b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e" Nov 26 17:16:28 crc kubenswrapper[5010]: E1126 17:16:28.498338 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e\": container with ID starting with b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e not found: ID does not exist" containerID="b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.498362 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e"} err="failed to get container status \"b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e\": rpc error: code = NotFound desc = could not find container \"b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e\": container with ID starting with b368effdb97d20d86c3bb80896c69085fbc000dc6e2ce6ae9dc180bac1b8ea1e not found: ID does not exist" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.498376 5010 scope.go:117] "RemoveContainer" containerID="78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950" Nov 26 17:16:28 crc kubenswrapper[5010]: E1126 17:16:28.498597 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950\": container with ID starting with 78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950 not found: ID does not exist" containerID="78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950" Nov 26 17:16:28 crc kubenswrapper[5010]: I1126 17:16:28.498619 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950"} err="failed to get container status \"78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950\": rpc error: code = NotFound desc = could not find container \"78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950\": container with ID starting with 78c5d0955ecc89490733413913dac062124c87dc8de76027920a008c16e69950 not found: ID does not exist" Nov 26 17:16:29 crc kubenswrapper[5010]: I1126 17:16:29.908016 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" path="/var/lib/kubelet/pods/9f46a5c3-1928-4904-8122-6a698f5b7b4a/volumes" Nov 26 17:16:31 crc kubenswrapper[5010]: I1126 17:16:31.430585 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"81478787-2999-4b71-94f6-b4e1c2618f2a","Type":"ContainerStarted","Data":"69f53898f7a97b8e8eb6f9579ede5d28db4cda5b8556eb13d5e13743f7e51a42"} Nov 26 17:16:34 crc kubenswrapper[5010]: I1126 17:16:34.467534 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"81478787-2999-4b71-94f6-b4e1c2618f2a","Type":"ContainerStarted","Data":"be95fefbb88978b4bab8c047eabe7880f2463795e1945820861c885fddcc207a"} Nov 26 17:16:34 crc kubenswrapper[5010]: I1126 17:16:34.468067 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:34 crc kubenswrapper[5010]: I1126 17:16:34.486465 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Nov 26 17:16:34 crc kubenswrapper[5010]: I1126 17:16:34.501164 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=6.009886295 podStartE2EDuration="23.501144193s" podCreationTimestamp="2025-11-26 17:16:11 +0000 UTC" firstStartedPulling="2025-11-26 17:16:12.737875384 +0000 UTC m=+6593.528592522" lastFinishedPulling="2025-11-26 17:16:30.229133272 +0000 UTC m=+6611.019850420" observedRunningTime="2025-11-26 17:16:34.494495528 +0000 UTC m=+6615.285212676" watchObservedRunningTime="2025-11-26 17:16:34.501144193 +0000 UTC m=+6615.291861341" Nov 26 17:16:35 crc kubenswrapper[5010]: I1126 17:16:35.482204 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerStarted","Data":"98a85a2714770cf609d6dbf54beaede3294c62319af5a0ba84857cd938386816"} Nov 26 17:16:37 crc kubenswrapper[5010]: I1126 17:16:37.892608 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:16:37 crc kubenswrapper[5010]: E1126 17:16:37.893554 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:16:39 crc kubenswrapper[5010]: I1126 17:16:39.534972 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerStarted","Data":"e190899c1c7911bd9466fddb529bf3f80ed6155f8102661af5900d1d6bbdb937"} Nov 26 17:16:42 crc kubenswrapper[5010]: I1126 17:16:42.577895 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerStarted","Data":"4665736d5b38764a49ec8397581e0516c68e746442ab07e2c70b3c9df836039a"} Nov 26 17:16:42 crc kubenswrapper[5010]: I1126 17:16:42.657802 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:42 crc kubenswrapper[5010]: I1126 17:16:42.657868 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:42 crc kubenswrapper[5010]: I1126 17:16:42.659958 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:42 crc kubenswrapper[5010]: I1126 17:16:42.691230 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.281175088 podStartE2EDuration="31.691210945s" podCreationTimestamp="2025-11-26 17:16:11 +0000 UTC" firstStartedPulling="2025-11-26 17:16:13.269467642 +0000 UTC m=+6594.060184780" lastFinishedPulling="2025-11-26 17:16:41.679503489 +0000 UTC m=+6622.470220637" observedRunningTime="2025-11-26 17:16:42.610102378 +0000 UTC m=+6623.400819536" watchObservedRunningTime="2025-11-26 17:16:42.691210945 +0000 UTC m=+6623.481928093" Nov 26 17:16:43 crc kubenswrapper[5010]: I1126 17:16:43.592909 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.934625 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.935277 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" containerName="openstackclient" containerID="cri-o://8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d" gracePeriod=2 Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.949594 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.968546 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:44 crc kubenswrapper[5010]: E1126 17:16:44.969369 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" containerName="openstackclient" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.969389 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" containerName="openstackclient" Nov 26 17:16:44 crc kubenswrapper[5010]: E1126 17:16:44.969415 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="extract-content" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.969424 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="extract-content" Nov 26 17:16:44 crc kubenswrapper[5010]: E1126 17:16:44.969434 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.969441 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" Nov 26 17:16:44 crc kubenswrapper[5010]: E1126 17:16:44.969460 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="extract-utilities" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.969466 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="extract-utilities" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.969644 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" containerName="openstackclient" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.969674 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f46a5c3-1928-4904-8122-6a698f5b7b4a" containerName="registry-server" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.970543 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.974526 5010 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" podUID="28b038c4-c8f4-4e86-835b-7225647d8e9a" Nov 26 17:16:44 crc kubenswrapper[5010]: I1126 17:16:44.985263 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.142392 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28b038c4-c8f4-4e86-835b-7225647d8e9a-openstack-config\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.142612 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b038c4-c8f4-4e86-835b-7225647d8e9a-combined-ca-bundle\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.142819 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28b038c4-c8f4-4e86-835b-7225647d8e9a-openstack-config-secret\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.143130 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qjz7f\" (UniqueName: \"kubernetes.io/projected/28b038c4-c8f4-4e86-835b-7225647d8e9a-kube-api-access-qjz7f\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.245295 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qjz7f\" (UniqueName: \"kubernetes.io/projected/28b038c4-c8f4-4e86-835b-7225647d8e9a-kube-api-access-qjz7f\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.245406 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28b038c4-c8f4-4e86-835b-7225647d8e9a-openstack-config\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.245477 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b038c4-c8f4-4e86-835b-7225647d8e9a-combined-ca-bundle\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.245532 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28b038c4-c8f4-4e86-835b-7225647d8e9a-openstack-config-secret\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.246421 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/28b038c4-c8f4-4e86-835b-7225647d8e9a-openstack-config\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.251077 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28b038c4-c8f4-4e86-835b-7225647d8e9a-combined-ca-bundle\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.259323 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/28b038c4-c8f4-4e86-835b-7225647d8e9a-openstack-config-secret\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.259831 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qjz7f\" (UniqueName: \"kubernetes.io/projected/28b038c4-c8f4-4e86-835b-7225647d8e9a-kube-api-access-qjz7f\") pod \"openstackclient\" (UID: \"28b038c4-c8f4-4e86-835b-7225647d8e9a\") " pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.293637 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:45 crc kubenswrapper[5010]: I1126 17:16:45.851464 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.041061 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.041622 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="prometheus" containerID="cri-o://98a85a2714770cf609d6dbf54beaede3294c62319af5a0ba84857cd938386816" gracePeriod=600 Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.041686 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="thanos-sidecar" containerID="cri-o://4665736d5b38764a49ec8397581e0516c68e746442ab07e2c70b3c9df836039a" gracePeriod=600 Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.041778 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="config-reloader" containerID="cri-o://e190899c1c7911bd9466fddb529bf3f80ed6155f8102661af5900d1d6bbdb937" gracePeriod=600 Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.626739 5010 generic.go:334] "Generic (PLEG): container finished" podID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerID="4665736d5b38764a49ec8397581e0516c68e746442ab07e2c70b3c9df836039a" exitCode=0 Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.627160 5010 generic.go:334] "Generic (PLEG): container finished" podID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerID="e190899c1c7911bd9466fddb529bf3f80ed6155f8102661af5900d1d6bbdb937" exitCode=0 Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.627178 5010 generic.go:334] "Generic (PLEG): container finished" podID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerID="98a85a2714770cf609d6dbf54beaede3294c62319af5a0ba84857cd938386816" exitCode=0 Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.626750 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerDied","Data":"4665736d5b38764a49ec8397581e0516c68e746442ab07e2c70b3c9df836039a"} Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.627270 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerDied","Data":"e190899c1c7911bd9466fddb529bf3f80ed6155f8102661af5900d1d6bbdb937"} Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.627293 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerDied","Data":"98a85a2714770cf609d6dbf54beaede3294c62319af5a0ba84857cd938386816"} Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.629874 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"28b038c4-c8f4-4e86-835b-7225647d8e9a","Type":"ContainerStarted","Data":"57e052b28a2ba0a0040e5258829b324d10df8c6824d4e0bf590ab1c3252ce2f3"} Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.629902 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"28b038c4-c8f4-4e86-835b-7225647d8e9a","Type":"ContainerStarted","Data":"3505928f5598fb90d9493bcbde56cec3789fc2ae6546b10c96f5ee79620f628c"} Nov 26 17:16:46 crc kubenswrapper[5010]: I1126 17:16:46.647046 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.647024924 podStartE2EDuration="2.647024924s" podCreationTimestamp="2025-11-26 17:16:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:16:46.643285581 +0000 UTC m=+6627.434002729" watchObservedRunningTime="2025-11-26 17:16:46.647024924 +0000 UTC m=+6627.437742082" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.114028 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.211074 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.285671 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.285773 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-web-config\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286410 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7fsv\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-kube-api-access-t7fsv\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286455 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3e7cbaba-7948-4346-a003-297a5cf57d45-prometheus-metric-storage-rulefiles-0\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286514 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7th79\" (UniqueName: \"kubernetes.io/projected/b17ee974-7531-47d7-960b-e6ff6dec4e05-kube-api-access-7th79\") pod \"b17ee974-7531-47d7-960b-e6ff6dec4e05\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286609 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3e7cbaba-7948-4346-a003-297a5cf57d45-config-out\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286647 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config-secret\") pod \"b17ee974-7531-47d7-960b-e6ff6dec4e05\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286684 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-combined-ca-bundle\") pod \"b17ee974-7531-47d7-960b-e6ff6dec4e05\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286894 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-tls-assets\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286949 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-thanos-prometheus-http-client-file\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.286986 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config\") pod \"b17ee974-7531-47d7-960b-e6ff6dec4e05\" (UID: \"b17ee974-7531-47d7-960b-e6ff6dec4e05\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.287038 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-config\") pod \"3e7cbaba-7948-4346-a003-297a5cf57d45\" (UID: \"3e7cbaba-7948-4346-a003-297a5cf57d45\") " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.288967 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e7cbaba-7948-4346-a003-297a5cf57d45-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.303189 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-kube-api-access-t7fsv" (OuterVolumeSpecName: "kube-api-access-t7fsv") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "kube-api-access-t7fsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.303170 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b17ee974-7531-47d7-960b-e6ff6dec4e05-kube-api-access-7th79" (OuterVolumeSpecName: "kube-api-access-7th79") pod "b17ee974-7531-47d7-960b-e6ff6dec4e05" (UID: "b17ee974-7531-47d7-960b-e6ff6dec4e05"). InnerVolumeSpecName "kube-api-access-7th79". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.303255 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.303248 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e7cbaba-7948-4346-a003-297a5cf57d45-config-out" (OuterVolumeSpecName: "config-out") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.304397 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.307693 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-config" (OuterVolumeSpecName: "config") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.330316 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.358699 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-web-config" (OuterVolumeSpecName: "web-config") pod "3e7cbaba-7948-4346-a003-297a5cf57d45" (UID: "3e7cbaba-7948-4346-a003-297a5cf57d45"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.361538 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "b17ee974-7531-47d7-960b-e6ff6dec4e05" (UID: "b17ee974-7531-47d7-960b-e6ff6dec4e05"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.364130 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b17ee974-7531-47d7-960b-e6ff6dec4e05" (UID: "b17ee974-7531-47d7-960b-e6ff6dec4e05"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.382151 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "b17ee974-7531-47d7-960b-e6ff6dec4e05" (UID: "b17ee974-7531-47d7-960b-e6ff6dec4e05"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390048 5010 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-tls-assets\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390079 5010 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390092 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390102 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390136 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") on node \"crc\" " Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390147 5010 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/3e7cbaba-7948-4346-a003-297a5cf57d45-web-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390157 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7fsv\" (UniqueName: \"kubernetes.io/projected/3e7cbaba-7948-4346-a003-297a5cf57d45-kube-api-access-t7fsv\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390167 5010 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/3e7cbaba-7948-4346-a003-297a5cf57d45-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390206 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7th79\" (UniqueName: \"kubernetes.io/projected/b17ee974-7531-47d7-960b-e6ff6dec4e05-kube-api-access-7th79\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390215 5010 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/3e7cbaba-7948-4346-a003-297a5cf57d45-config-out\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390224 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.390233 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b17ee974-7531-47d7-960b-e6ff6dec4e05-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.426254 5010 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.426410 5010 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1") on node "crc" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.491992 5010 reconciler_common.go:293] "Volume detached for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") on node \"crc\" DevicePath \"\"" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.643574 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"3e7cbaba-7948-4346-a003-297a5cf57d45","Type":"ContainerDied","Data":"9fd58a83fd0b68cc1ad55e2842f9912e8bb0911fa0478cb9c11018d0dc17eb04"} Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.643624 5010 scope.go:117] "RemoveContainer" containerID="4665736d5b38764a49ec8397581e0516c68e746442ab07e2c70b3c9df836039a" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.643627 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.649169 5010 generic.go:334] "Generic (PLEG): container finished" podID="b17ee974-7531-47d7-960b-e6ff6dec4e05" containerID="8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d" exitCode=137 Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.649564 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.669665 5010 scope.go:117] "RemoveContainer" containerID="e190899c1c7911bd9466fddb529bf3f80ed6155f8102661af5900d1d6bbdb937" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.686231 5010 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" podUID="28b038c4-c8f4-4e86-835b-7225647d8e9a" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.689893 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.706127 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.726037 5010 scope.go:117] "RemoveContainer" containerID="98a85a2714770cf609d6dbf54beaede3294c62319af5a0ba84857cd938386816" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.727891 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:47 crc kubenswrapper[5010]: E1126 17:16:47.728564 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="prometheus" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.728581 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="prometheus" Nov 26 17:16:47 crc kubenswrapper[5010]: E1126 17:16:47.728612 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="config-reloader" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.728618 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="config-reloader" Nov 26 17:16:47 crc kubenswrapper[5010]: E1126 17:16:47.728668 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="init-config-reloader" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.728675 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="init-config-reloader" Nov 26 17:16:47 crc kubenswrapper[5010]: E1126 17:16:47.728687 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="thanos-sidecar" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.728693 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="thanos-sidecar" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.728994 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="prometheus" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.729009 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="config-reloader" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.729047 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" containerName="thanos-sidecar" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.731798 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.738021 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.738022 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.738129 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-2kzxp" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.738428 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.738610 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.738761 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.751326 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.756834 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.758362 5010 scope.go:117] "RemoveContainer" containerID="c7de79f478fe52c3e3858c73017a3e4013111589937be1977b86aa26604d86cd" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.812243 5010 scope.go:117] "RemoveContainer" containerID="8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.835754 5010 scope.go:117] "RemoveContainer" containerID="8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d" Nov 26 17:16:47 crc kubenswrapper[5010]: E1126 17:16:47.836824 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d\": container with ID starting with 8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d not found: ID does not exist" containerID="8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.836949 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d"} err="failed to get container status \"8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d\": rpc error: code = NotFound desc = could not find container \"8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d\": container with ID starting with 8fb33cc7c4277e74d81b44a86e4bf56a94212e9391d5a7271b9e0c0d4e90a20d not found: ID does not exist" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.898583 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.898677 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.898776 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.898932 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.898972 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c74ec58-98b4-4a24-995d-a4c6c15376a9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.899048 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c74ec58-98b4-4a24-995d-a4c6c15376a9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.899073 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.899091 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7wfz\" (UniqueName: \"kubernetes.io/projected/1c74ec58-98b4-4a24-995d-a4c6c15376a9-kube-api-access-d7wfz\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.899117 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c74ec58-98b4-4a24-995d-a4c6c15376a9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.899146 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.899209 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.904242 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e7cbaba-7948-4346-a003-297a5cf57d45" path="/var/lib/kubelet/pods/3e7cbaba-7948-4346-a003-297a5cf57d45/volumes" Nov 26 17:16:47 crc kubenswrapper[5010]: I1126 17:16:47.905015 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b17ee974-7531-47d7-960b-e6ff6dec4e05" path="/var/lib/kubelet/pods/b17ee974-7531-47d7-960b-e6ff6dec4e05/volumes" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.001612 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.001723 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c74ec58-98b4-4a24-995d-a4c6c15376a9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.001799 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c74ec58-98b4-4a24-995d-a4c6c15376a9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.001825 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.001853 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7wfz\" (UniqueName: \"kubernetes.io/projected/1c74ec58-98b4-4a24-995d-a4c6c15376a9-kube-api-access-d7wfz\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.001888 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c74ec58-98b4-4a24-995d-a4c6c15376a9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.001978 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.002028 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.002083 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.002288 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.002343 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.004024 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/1c74ec58-98b4-4a24-995d-a4c6c15376a9-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.007993 5010 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.008052 5010 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/d10ddf75f13d4a3911b74a68fa4825e3b3114b10125bd4519bf25b697d859ae9/globalmount\"" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.008124 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/1c74ec58-98b4-4a24-995d-a4c6c15376a9-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.008002 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.008739 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/1c74ec58-98b4-4a24-995d-a4c6c15376a9-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.008839 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.009070 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.009917 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-config\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.010472 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.017429 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/1c74ec58-98b4-4a24-995d-a4c6c15376a9-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.023945 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7wfz\" (UniqueName: \"kubernetes.io/projected/1c74ec58-98b4-4a24-995d-a4c6c15376a9-kube-api-access-d7wfz\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.067122 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-50aa2741-8e9b-49d7-b2a4-4832dda32de1\") pod \"prometheus-metric-storage-0\" (UID: \"1c74ec58-98b4-4a24-995d-a4c6c15376a9\") " pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.358014 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Nov 26 17:16:48 crc kubenswrapper[5010]: I1126 17:16:48.896001 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Nov 26 17:16:48 crc kubenswrapper[5010]: W1126 17:16:48.900957 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c74ec58_98b4_4a24_995d_a4c6c15376a9.slice/crio-686564d847eba75e0e9a608201011f5c3c1eb878c209994934582e085a32fa3f WatchSource:0}: Error finding container 686564d847eba75e0e9a608201011f5c3c1eb878c209994934582e085a32fa3f: Status 404 returned error can't find the container with id 686564d847eba75e0e9a608201011f5c3c1eb878c209994934582e085a32fa3f Nov 26 17:16:49 crc kubenswrapper[5010]: I1126 17:16:49.680683 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c74ec58-98b4-4a24-995d-a4c6c15376a9","Type":"ContainerStarted","Data":"686564d847eba75e0e9a608201011f5c3c1eb878c209994934582e085a32fa3f"} Nov 26 17:16:51 crc kubenswrapper[5010]: I1126 17:16:51.892026 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:16:51 crc kubenswrapper[5010]: E1126 17:16:51.892847 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:16:52 crc kubenswrapper[5010]: I1126 17:16:52.059192 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-9fe4-account-create-update-t4fxr"] Nov 26 17:16:52 crc kubenswrapper[5010]: I1126 17:16:52.079404 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-7f87h"] Nov 26 17:16:52 crc kubenswrapper[5010]: I1126 17:16:52.094535 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-7f87h"] Nov 26 17:16:52 crc kubenswrapper[5010]: I1126 17:16:52.106207 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-9fe4-account-create-update-t4fxr"] Nov 26 17:16:52 crc kubenswrapper[5010]: I1126 17:16:52.710350 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c74ec58-98b4-4a24-995d-a4c6c15376a9","Type":"ContainerStarted","Data":"590590e01e099f07ef3924bbe5c2823d3ee08bbb543722f1a82371f937b0918c"} Nov 26 17:16:53 crc kubenswrapper[5010]: I1126 17:16:53.920443 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37efa8b7-9526-4456-b8fb-3f637f7b03ba" path="/var/lib/kubelet/pods/37efa8b7-9526-4456-b8fb-3f637f7b03ba/volumes" Nov 26 17:16:53 crc kubenswrapper[5010]: I1126 17:16:53.922176 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70f6ece9-ed80-47e5-9da8-7d958b8da066" path="/var/lib/kubelet/pods/70f6ece9-ed80-47e5-9da8-7d958b8da066/volumes" Nov 26 17:16:59 crc kubenswrapper[5010]: I1126 17:16:59.038312 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-j2gdw"] Nov 26 17:16:59 crc kubenswrapper[5010]: I1126 17:16:59.057858 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-j2gdw"] Nov 26 17:16:59 crc kubenswrapper[5010]: I1126 17:16:59.811872 5010 generic.go:334] "Generic (PLEG): container finished" podID="1c74ec58-98b4-4a24-995d-a4c6c15376a9" containerID="590590e01e099f07ef3924bbe5c2823d3ee08bbb543722f1a82371f937b0918c" exitCode=0 Nov 26 17:16:59 crc kubenswrapper[5010]: I1126 17:16:59.811927 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c74ec58-98b4-4a24-995d-a4c6c15376a9","Type":"ContainerDied","Data":"590590e01e099f07ef3924bbe5c2823d3ee08bbb543722f1a82371f937b0918c"} Nov 26 17:16:59 crc kubenswrapper[5010]: I1126 17:16:59.908889 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52088ec4-ddbc-4524-83e8-ca6d029082fc" path="/var/lib/kubelet/pods/52088ec4-ddbc-4524-83e8-ca6d029082fc/volumes" Nov 26 17:17:00 crc kubenswrapper[5010]: I1126 17:17:00.824587 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c74ec58-98b4-4a24-995d-a4c6c15376a9","Type":"ContainerStarted","Data":"c646247c32142ab973b5e79c7dc3d8a573e14ed0b6278d732759d0742e630d0a"} Nov 26 17:17:03 crc kubenswrapper[5010]: I1126 17:17:03.872892 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c74ec58-98b4-4a24-995d-a4c6c15376a9","Type":"ContainerStarted","Data":"95607dce30fd512700fa95e4d47e961fbff0e6fe6487d8d57e76817a09ed7456"} Nov 26 17:17:03 crc kubenswrapper[5010]: I1126 17:17:03.873517 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"1c74ec58-98b4-4a24-995d-a4c6c15376a9","Type":"ContainerStarted","Data":"a556d21b8496c9b0c8bb80b74295f522da96bc6173482100f1d519be8d75d81c"} Nov 26 17:17:03 crc kubenswrapper[5010]: I1126 17:17:03.891971 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:17:03 crc kubenswrapper[5010]: E1126 17:17:03.892283 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:17:03 crc kubenswrapper[5010]: I1126 17:17:03.908828 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=16.908811476 podStartE2EDuration="16.908811476s" podCreationTimestamp="2025-11-26 17:16:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:17:03.900056978 +0000 UTC m=+6644.690774126" watchObservedRunningTime="2025-11-26 17:17:03.908811476 +0000 UTC m=+6644.699528624" Nov 26 17:17:08 crc kubenswrapper[5010]: I1126 17:17:08.358997 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.189829 5010 scope.go:117] "RemoveContainer" containerID="4d373c9426554371258c7ba49ae4a0ebc68f14ff98789c572ff83d9adb33d716" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.228195 5010 scope.go:117] "RemoveContainer" containerID="671fe32738a86be9e90208842b66a407a2c2b627c2efcd25ede6f23bbeeb9a7c" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.278734 5010 scope.go:117] "RemoveContainer" containerID="0b7f78224bef14c31f743c77d8b62088f208baf2293fa76358e3da3d4bd5b63b" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.334654 5010 scope.go:117] "RemoveContainer" containerID="12153338c30a6ab6210e5532050d36947e6fe2427df55189f3a9fbd8d0f3bea5" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.370943 5010 scope.go:117] "RemoveContainer" containerID="fce637e741eb6549f09d92349a058b0ce245a0b06e4ba6e11805c6b8ed1b6a94" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.424552 5010 scope.go:117] "RemoveContainer" containerID="ec802261bbbd66221ad53688b9a2c9db41a84d77432395a0b6a926df6d41dbde" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.461353 5010 scope.go:117] "RemoveContainer" containerID="bed95050dadf9ae64c267169121063109fb44e09a4436bbab27599cab5316739" Nov 26 17:17:09 crc kubenswrapper[5010]: I1126 17:17:09.514057 5010 scope.go:117] "RemoveContainer" containerID="0e53fa8c2e11c7da51c9ca09f6f0926bfd55f433e8922611449b5f51abb466c9" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.002814 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.018075 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.022426 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.031148 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.045495 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.069173 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.069371 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-run-httpd\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.069458 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zwv8\" (UniqueName: \"kubernetes.io/projected/74fe8866-bdcf-4890-96a4-6494b5c64866-kube-api-access-7zwv8\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.069485 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-config-data\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.069551 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.069783 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-log-httpd\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.069854 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-scripts\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.171567 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-run-httpd\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.171640 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zwv8\" (UniqueName: \"kubernetes.io/projected/74fe8866-bdcf-4890-96a4-6494b5c64866-kube-api-access-7zwv8\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.171673 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-config-data\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.171738 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.171816 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-log-httpd\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.171850 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-scripts\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.171931 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.172570 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-log-httpd\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.173002 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-run-httpd\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.176545 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.177223 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-scripts\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.177610 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-config-data\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.178335 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.202593 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zwv8\" (UniqueName: \"kubernetes.io/projected/74fe8866-bdcf-4890-96a4-6494b5c64866-kube-api-access-7zwv8\") pod \"ceilometer-0\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.349171 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:14 crc kubenswrapper[5010]: I1126 17:17:14.882763 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:14 crc kubenswrapper[5010]: W1126 17:17:14.903363 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74fe8866_bdcf_4890_96a4_6494b5c64866.slice/crio-fcff79e1a31bc1451343e775a33203cd70385458f079b84e35e8ba8633d7d3a3 WatchSource:0}: Error finding container fcff79e1a31bc1451343e775a33203cd70385458f079b84e35e8ba8633d7d3a3: Status 404 returned error can't find the container with id fcff79e1a31bc1451343e775a33203cd70385458f079b84e35e8ba8633d7d3a3 Nov 26 17:17:15 crc kubenswrapper[5010]: I1126 17:17:15.043831 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerStarted","Data":"fcff79e1a31bc1451343e775a33203cd70385458f079b84e35e8ba8633d7d3a3"} Nov 26 17:17:15 crc kubenswrapper[5010]: I1126 17:17:15.891352 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:17:17 crc kubenswrapper[5010]: I1126 17:17:17.073994 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerStarted","Data":"d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4"} Nov 26 17:17:17 crc kubenswrapper[5010]: I1126 17:17:17.076870 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"ef98edb74c5bb6bcfce742aabe11d10cee533dd74639da58dff43195ce6da7ae"} Nov 26 17:17:18 crc kubenswrapper[5010]: I1126 17:17:18.088986 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerStarted","Data":"d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc"} Nov 26 17:17:18 crc kubenswrapper[5010]: I1126 17:17:18.359139 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Nov 26 17:17:18 crc kubenswrapper[5010]: I1126 17:17:18.367551 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Nov 26 17:17:19 crc kubenswrapper[5010]: I1126 17:17:19.103809 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerStarted","Data":"16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f"} Nov 26 17:17:19 crc kubenswrapper[5010]: I1126 17:17:19.111343 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Nov 26 17:17:21 crc kubenswrapper[5010]: I1126 17:17:21.126471 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerStarted","Data":"1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d"} Nov 26 17:17:21 crc kubenswrapper[5010]: I1126 17:17:21.127402 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 17:17:21 crc kubenswrapper[5010]: I1126 17:17:21.170672 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.916242364 podStartE2EDuration="8.170638821s" podCreationTimestamp="2025-11-26 17:17:13 +0000 UTC" firstStartedPulling="2025-11-26 17:17:14.911490391 +0000 UTC m=+6655.702207539" lastFinishedPulling="2025-11-26 17:17:20.165886848 +0000 UTC m=+6660.956603996" observedRunningTime="2025-11-26 17:17:21.169193335 +0000 UTC m=+6661.959910483" watchObservedRunningTime="2025-11-26 17:17:21.170638821 +0000 UTC m=+6661.961356009" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.370190 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-kkbjt"] Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.371977 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.386075 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-kkbjt"] Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.536356 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nnj56\" (UniqueName: \"kubernetes.io/projected/6697bc34-58f2-4daf-b940-1d78a44566e4-kube-api-access-nnj56\") pod \"aodh-db-create-kkbjt\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.536761 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6697bc34-58f2-4daf-b940-1d78a44566e4-operator-scripts\") pod \"aodh-db-create-kkbjt\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.579467 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-e014-account-create-update-gcfkg"] Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.581420 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.589173 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.591316 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-e014-account-create-update-gcfkg"] Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.638480 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6697bc34-58f2-4daf-b940-1d78a44566e4-operator-scripts\") pod \"aodh-db-create-kkbjt\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.638607 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nnj56\" (UniqueName: \"kubernetes.io/projected/6697bc34-58f2-4daf-b940-1d78a44566e4-kube-api-access-nnj56\") pod \"aodh-db-create-kkbjt\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.639279 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6697bc34-58f2-4daf-b940-1d78a44566e4-operator-scripts\") pod \"aodh-db-create-kkbjt\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.659892 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nnj56\" (UniqueName: \"kubernetes.io/projected/6697bc34-58f2-4daf-b940-1d78a44566e4-kube-api-access-nnj56\") pod \"aodh-db-create-kkbjt\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.690832 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.741154 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83d1625c-1b09-40b6-8c61-dd86d17becf1-operator-scripts\") pod \"aodh-e014-account-create-update-gcfkg\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.741211 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ctn84\" (UniqueName: \"kubernetes.io/projected/83d1625c-1b09-40b6-8c61-dd86d17becf1-kube-api-access-ctn84\") pod \"aodh-e014-account-create-update-gcfkg\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.843450 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83d1625c-1b09-40b6-8c61-dd86d17becf1-operator-scripts\") pod \"aodh-e014-account-create-update-gcfkg\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.843856 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ctn84\" (UniqueName: \"kubernetes.io/projected/83d1625c-1b09-40b6-8c61-dd86d17becf1-kube-api-access-ctn84\") pod \"aodh-e014-account-create-update-gcfkg\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.845230 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83d1625c-1b09-40b6-8c61-dd86d17becf1-operator-scripts\") pod \"aodh-e014-account-create-update-gcfkg\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.888147 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ctn84\" (UniqueName: \"kubernetes.io/projected/83d1625c-1b09-40b6-8c61-dd86d17becf1-kube-api-access-ctn84\") pod \"aodh-e014-account-create-update-gcfkg\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:28 crc kubenswrapper[5010]: I1126 17:17:28.899488 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:29 crc kubenswrapper[5010]: I1126 17:17:29.282734 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-kkbjt"] Nov 26 17:17:29 crc kubenswrapper[5010]: W1126 17:17:29.312078 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6697bc34_58f2_4daf_b940_1d78a44566e4.slice/crio-ef7f0e6a5fbc224ae1bbb0d979ca38ebc40996e27fec623a9bb52866246115ea WatchSource:0}: Error finding container ef7f0e6a5fbc224ae1bbb0d979ca38ebc40996e27fec623a9bb52866246115ea: Status 404 returned error can't find the container with id ef7f0e6a5fbc224ae1bbb0d979ca38ebc40996e27fec623a9bb52866246115ea Nov 26 17:17:29 crc kubenswrapper[5010]: I1126 17:17:29.507872 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-e014-account-create-update-gcfkg"] Nov 26 17:17:29 crc kubenswrapper[5010]: W1126 17:17:29.510810 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83d1625c_1b09_40b6_8c61_dd86d17becf1.slice/crio-8050744c8a50fa3f193e7637e65a3842f092f8cd08ca57f8f7caa320ba95a65f WatchSource:0}: Error finding container 8050744c8a50fa3f193e7637e65a3842f092f8cd08ca57f8f7caa320ba95a65f: Status 404 returned error can't find the container with id 8050744c8a50fa3f193e7637e65a3842f092f8cd08ca57f8f7caa320ba95a65f Nov 26 17:17:30 crc kubenswrapper[5010]: I1126 17:17:30.218270 5010 generic.go:334] "Generic (PLEG): container finished" podID="83d1625c-1b09-40b6-8c61-dd86d17becf1" containerID="664c8520987d7ea21f3cd43367fe35690af16d17ac311f390b0db78e51cc475f" exitCode=0 Nov 26 17:17:30 crc kubenswrapper[5010]: I1126 17:17:30.218370 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-e014-account-create-update-gcfkg" event={"ID":"83d1625c-1b09-40b6-8c61-dd86d17becf1","Type":"ContainerDied","Data":"664c8520987d7ea21f3cd43367fe35690af16d17ac311f390b0db78e51cc475f"} Nov 26 17:17:30 crc kubenswrapper[5010]: I1126 17:17:30.218669 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-e014-account-create-update-gcfkg" event={"ID":"83d1625c-1b09-40b6-8c61-dd86d17becf1","Type":"ContainerStarted","Data":"8050744c8a50fa3f193e7637e65a3842f092f8cd08ca57f8f7caa320ba95a65f"} Nov 26 17:17:30 crc kubenswrapper[5010]: I1126 17:17:30.220454 5010 generic.go:334] "Generic (PLEG): container finished" podID="6697bc34-58f2-4daf-b940-1d78a44566e4" containerID="905a06598263d807dec2785e9f03a9e51270d15ca49adda007cc6497e3aec5af" exitCode=0 Nov 26 17:17:30 crc kubenswrapper[5010]: I1126 17:17:30.220477 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-kkbjt" event={"ID":"6697bc34-58f2-4daf-b940-1d78a44566e4","Type":"ContainerDied","Data":"905a06598263d807dec2785e9f03a9e51270d15ca49adda007cc6497e3aec5af"} Nov 26 17:17:30 crc kubenswrapper[5010]: I1126 17:17:30.220499 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-kkbjt" event={"ID":"6697bc34-58f2-4daf-b940-1d78a44566e4","Type":"ContainerStarted","Data":"ef7f0e6a5fbc224ae1bbb0d979ca38ebc40996e27fec623a9bb52866246115ea"} Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.801039 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.807254 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.915791 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ctn84\" (UniqueName: \"kubernetes.io/projected/83d1625c-1b09-40b6-8c61-dd86d17becf1-kube-api-access-ctn84\") pod \"83d1625c-1b09-40b6-8c61-dd86d17becf1\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.915916 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83d1625c-1b09-40b6-8c61-dd86d17becf1-operator-scripts\") pod \"83d1625c-1b09-40b6-8c61-dd86d17becf1\" (UID: \"83d1625c-1b09-40b6-8c61-dd86d17becf1\") " Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.916028 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nnj56\" (UniqueName: \"kubernetes.io/projected/6697bc34-58f2-4daf-b940-1d78a44566e4-kube-api-access-nnj56\") pod \"6697bc34-58f2-4daf-b940-1d78a44566e4\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.916189 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6697bc34-58f2-4daf-b940-1d78a44566e4-operator-scripts\") pod \"6697bc34-58f2-4daf-b940-1d78a44566e4\" (UID: \"6697bc34-58f2-4daf-b940-1d78a44566e4\") " Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.917002 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d1625c-1b09-40b6-8c61-dd86d17becf1-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "83d1625c-1b09-40b6-8c61-dd86d17becf1" (UID: "83d1625c-1b09-40b6-8c61-dd86d17becf1"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.917184 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6697bc34-58f2-4daf-b940-1d78a44566e4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6697bc34-58f2-4daf-b940-1d78a44566e4" (UID: "6697bc34-58f2-4daf-b940-1d78a44566e4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.923338 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6697bc34-58f2-4daf-b940-1d78a44566e4-kube-api-access-nnj56" (OuterVolumeSpecName: "kube-api-access-nnj56") pod "6697bc34-58f2-4daf-b940-1d78a44566e4" (UID: "6697bc34-58f2-4daf-b940-1d78a44566e4"). InnerVolumeSpecName "kube-api-access-nnj56". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:17:31 crc kubenswrapper[5010]: I1126 17:17:31.928038 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83d1625c-1b09-40b6-8c61-dd86d17becf1-kube-api-access-ctn84" (OuterVolumeSpecName: "kube-api-access-ctn84") pod "83d1625c-1b09-40b6-8c61-dd86d17becf1" (UID: "83d1625c-1b09-40b6-8c61-dd86d17becf1"). InnerVolumeSpecName "kube-api-access-ctn84". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.018402 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ctn84\" (UniqueName: \"kubernetes.io/projected/83d1625c-1b09-40b6-8c61-dd86d17becf1-kube-api-access-ctn84\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.018441 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/83d1625c-1b09-40b6-8c61-dd86d17becf1-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.018453 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nnj56\" (UniqueName: \"kubernetes.io/projected/6697bc34-58f2-4daf-b940-1d78a44566e4-kube-api-access-nnj56\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.018465 5010 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6697bc34-58f2-4daf-b940-1d78a44566e4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.248617 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-e014-account-create-update-gcfkg" event={"ID":"83d1625c-1b09-40b6-8c61-dd86d17becf1","Type":"ContainerDied","Data":"8050744c8a50fa3f193e7637e65a3842f092f8cd08ca57f8f7caa320ba95a65f"} Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.248659 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8050744c8a50fa3f193e7637e65a3842f092f8cd08ca57f8f7caa320ba95a65f" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.248747 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-e014-account-create-update-gcfkg" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.251892 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-kkbjt" event={"ID":"6697bc34-58f2-4daf-b940-1d78a44566e4","Type":"ContainerDied","Data":"ef7f0e6a5fbc224ae1bbb0d979ca38ebc40996e27fec623a9bb52866246115ea"} Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.251940 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-kkbjt" Nov 26 17:17:32 crc kubenswrapper[5010]: I1126 17:17:32.251952 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef7f0e6a5fbc224ae1bbb0d979ca38ebc40996e27fec623a9bb52866246115ea" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.822752 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-rvmvt"] Nov 26 17:17:33 crc kubenswrapper[5010]: E1126 17:17:33.823604 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d1625c-1b09-40b6-8c61-dd86d17becf1" containerName="mariadb-account-create-update" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.823623 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d1625c-1b09-40b6-8c61-dd86d17becf1" containerName="mariadb-account-create-update" Nov 26 17:17:33 crc kubenswrapper[5010]: E1126 17:17:33.823655 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6697bc34-58f2-4daf-b940-1d78a44566e4" containerName="mariadb-database-create" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.823664 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6697bc34-58f2-4daf-b940-1d78a44566e4" containerName="mariadb-database-create" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.823928 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="83d1625c-1b09-40b6-8c61-dd86d17becf1" containerName="mariadb-account-create-update" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.823943 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6697bc34-58f2-4daf-b940-1d78a44566e4" containerName="mariadb-database-create" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.824791 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.826887 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.827464 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.827543 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.829172 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-kt4xm" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.836142 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-rvmvt"] Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.959698 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-scripts\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.960165 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-config-data\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.960302 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-combined-ca-bundle\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:33 crc kubenswrapper[5010]: I1126 17:17:33.960354 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c22gr\" (UniqueName: \"kubernetes.io/projected/642a032a-cc39-475a-87b2-0d5d25c24b04-kube-api-access-c22gr\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.062120 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c22gr\" (UniqueName: \"kubernetes.io/projected/642a032a-cc39-475a-87b2-0d5d25c24b04-kube-api-access-c22gr\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.062245 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-scripts\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.062357 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-config-data\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.062392 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-combined-ca-bundle\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.068088 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-scripts\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.068553 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-config-data\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.074730 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-combined-ca-bundle\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.079065 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c22gr\" (UniqueName: \"kubernetes.io/projected/642a032a-cc39-475a-87b2-0d5d25c24b04-kube-api-access-c22gr\") pod \"aodh-db-sync-rvmvt\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.159073 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:34 crc kubenswrapper[5010]: I1126 17:17:34.667125 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-rvmvt"] Nov 26 17:17:34 crc kubenswrapper[5010]: W1126 17:17:34.672339 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod642a032a_cc39_475a_87b2_0d5d25c24b04.slice/crio-3b14a4854d5b614c9b2e6624d56ac87837e2ba05fb957639e14310b1921bcb43 WatchSource:0}: Error finding container 3b14a4854d5b614c9b2e6624d56ac87837e2ba05fb957639e14310b1921bcb43: Status 404 returned error can't find the container with id 3b14a4854d5b614c9b2e6624d56ac87837e2ba05fb957639e14310b1921bcb43 Nov 26 17:17:35 crc kubenswrapper[5010]: I1126 17:17:35.285997 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-rvmvt" event={"ID":"642a032a-cc39-475a-87b2-0d5d25c24b04","Type":"ContainerStarted","Data":"3b14a4854d5b614c9b2e6624d56ac87837e2ba05fb957639e14310b1921bcb43"} Nov 26 17:17:40 crc kubenswrapper[5010]: I1126 17:17:40.353876 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-rvmvt" event={"ID":"642a032a-cc39-475a-87b2-0d5d25c24b04","Type":"ContainerStarted","Data":"4e00a440f8277f4633ed7bf67fd56c380b77c9d990870016072e44a7b68c2458"} Nov 26 17:17:40 crc kubenswrapper[5010]: I1126 17:17:40.374139 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-rvmvt" podStartSLOduration=2.850625179 podStartE2EDuration="7.374120164s" podCreationTimestamp="2025-11-26 17:17:33 +0000 UTC" firstStartedPulling="2025-11-26 17:17:34.68311733 +0000 UTC m=+6675.473834488" lastFinishedPulling="2025-11-26 17:17:39.206612325 +0000 UTC m=+6679.997329473" observedRunningTime="2025-11-26 17:17:40.367993112 +0000 UTC m=+6681.158710270" watchObservedRunningTime="2025-11-26 17:17:40.374120164 +0000 UTC m=+6681.164837312" Nov 26 17:17:42 crc kubenswrapper[5010]: I1126 17:17:42.382022 5010 generic.go:334] "Generic (PLEG): container finished" podID="642a032a-cc39-475a-87b2-0d5d25c24b04" containerID="4e00a440f8277f4633ed7bf67fd56c380b77c9d990870016072e44a7b68c2458" exitCode=0 Nov 26 17:17:42 crc kubenswrapper[5010]: I1126 17:17:42.382117 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-rvmvt" event={"ID":"642a032a-cc39-475a-87b2-0d5d25c24b04","Type":"ContainerDied","Data":"4e00a440f8277f4633ed7bf67fd56c380b77c9d990870016072e44a7b68c2458"} Nov 26 17:17:43 crc kubenswrapper[5010]: I1126 17:17:43.875893 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.017974 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c22gr\" (UniqueName: \"kubernetes.io/projected/642a032a-cc39-475a-87b2-0d5d25c24b04-kube-api-access-c22gr\") pod \"642a032a-cc39-475a-87b2-0d5d25c24b04\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.018094 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-scripts\") pod \"642a032a-cc39-475a-87b2-0d5d25c24b04\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.018118 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-combined-ca-bundle\") pod \"642a032a-cc39-475a-87b2-0d5d25c24b04\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.018231 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-config-data\") pod \"642a032a-cc39-475a-87b2-0d5d25c24b04\" (UID: \"642a032a-cc39-475a-87b2-0d5d25c24b04\") " Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.023754 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/642a032a-cc39-475a-87b2-0d5d25c24b04-kube-api-access-c22gr" (OuterVolumeSpecName: "kube-api-access-c22gr") pod "642a032a-cc39-475a-87b2-0d5d25c24b04" (UID: "642a032a-cc39-475a-87b2-0d5d25c24b04"). InnerVolumeSpecName "kube-api-access-c22gr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.025908 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-scripts" (OuterVolumeSpecName: "scripts") pod "642a032a-cc39-475a-87b2-0d5d25c24b04" (UID: "642a032a-cc39-475a-87b2-0d5d25c24b04"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.052941 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-config-data" (OuterVolumeSpecName: "config-data") pod "642a032a-cc39-475a-87b2-0d5d25c24b04" (UID: "642a032a-cc39-475a-87b2-0d5d25c24b04"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.058802 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "642a032a-cc39-475a-87b2-0d5d25c24b04" (UID: "642a032a-cc39-475a-87b2-0d5d25c24b04"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.120962 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c22gr\" (UniqueName: \"kubernetes.io/projected/642a032a-cc39-475a-87b2-0d5d25c24b04-kube-api-access-c22gr\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.120991 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.121001 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.121009 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/642a032a-cc39-475a-87b2-0d5d25c24b04-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.362037 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.417236 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-rvmvt" event={"ID":"642a032a-cc39-475a-87b2-0d5d25c24b04","Type":"ContainerDied","Data":"3b14a4854d5b614c9b2e6624d56ac87837e2ba05fb957639e14310b1921bcb43"} Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.417303 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b14a4854d5b614c9b2e6624d56ac87837e2ba05fb957639e14310b1921bcb43" Nov 26 17:17:44 crc kubenswrapper[5010]: I1126 17:17:44.417309 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-rvmvt" Nov 26 17:17:48 crc kubenswrapper[5010]: I1126 17:17:48.745047 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:17:48 crc kubenswrapper[5010]: I1126 17:17:48.746857 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="614425eb-8ee8-405f-a428-d98ded958f1a" containerName="kube-state-metrics" containerID="cri-o://979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79" gracePeriod=30 Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.052895 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 26 17:17:49 crc kubenswrapper[5010]: E1126 17:17:49.053669 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="642a032a-cc39-475a-87b2-0d5d25c24b04" containerName="aodh-db-sync" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.053686 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="642a032a-cc39-475a-87b2-0d5d25c24b04" containerName="aodh-db-sync" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.054009 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="642a032a-cc39-475a-87b2-0d5d25c24b04" containerName="aodh-db-sync" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.077581 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.077690 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.081187 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.081808 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-kt4xm" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.087195 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.131433 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-combined-ca-bundle\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.131931 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhgsq\" (UniqueName: \"kubernetes.io/projected/304dbb93-0877-42a8-87f9-61e3fcf5bbca-kube-api-access-mhgsq\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.132161 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.132241 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-scripts\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.234556 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.234655 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-scripts\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.234724 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-combined-ca-bundle\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.234787 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhgsq\" (UniqueName: \"kubernetes.io/projected/304dbb93-0877-42a8-87f9-61e3fcf5bbca-kube-api-access-mhgsq\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.241243 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-scripts\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.241789 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.249347 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-combined-ca-bundle\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.266306 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhgsq\" (UniqueName: \"kubernetes.io/projected/304dbb93-0877-42a8-87f9-61e3fcf5bbca-kube-api-access-mhgsq\") pod \"aodh-0\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.374560 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.407883 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.438279 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rt4lp\" (UniqueName: \"kubernetes.io/projected/614425eb-8ee8-405f-a428-d98ded958f1a-kube-api-access-rt4lp\") pod \"614425eb-8ee8-405f-a428-d98ded958f1a\" (UID: \"614425eb-8ee8-405f-a428-d98ded958f1a\") " Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.441304 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/614425eb-8ee8-405f-a428-d98ded958f1a-kube-api-access-rt4lp" (OuterVolumeSpecName: "kube-api-access-rt4lp") pod "614425eb-8ee8-405f-a428-d98ded958f1a" (UID: "614425eb-8ee8-405f-a428-d98ded958f1a"). InnerVolumeSpecName "kube-api-access-rt4lp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.473201 5010 generic.go:334] "Generic (PLEG): container finished" podID="614425eb-8ee8-405f-a428-d98ded958f1a" containerID="979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79" exitCode=2 Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.473243 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"614425eb-8ee8-405f-a428-d98ded958f1a","Type":"ContainerDied","Data":"979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79"} Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.473269 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"614425eb-8ee8-405f-a428-d98ded958f1a","Type":"ContainerDied","Data":"af4c0c9f96e9644d6f21b757e404813ec58d9ef922363a2051c0fce48caa2e42"} Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.473285 5010 scope.go:117] "RemoveContainer" containerID="979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.473412 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.541215 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rt4lp\" (UniqueName: \"kubernetes.io/projected/614425eb-8ee8-405f-a428-d98ded958f1a-kube-api-access-rt4lp\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.554424 5010 scope.go:117] "RemoveContainer" containerID="979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79" Nov 26 17:17:49 crc kubenswrapper[5010]: E1126 17:17:49.558956 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79\": container with ID starting with 979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79 not found: ID does not exist" containerID="979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.559018 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79"} err="failed to get container status \"979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79\": rpc error: code = NotFound desc = could not find container \"979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79\": container with ID starting with 979c7f215810e0ac2715f9638eed80a782f12f0299f402a0dc3364c0a2d67a79 not found: ID does not exist" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.559064 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.583783 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.595773 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:17:49 crc kubenswrapper[5010]: E1126 17:17:49.596254 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="614425eb-8ee8-405f-a428-d98ded958f1a" containerName="kube-state-metrics" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.596268 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="614425eb-8ee8-405f-a428-d98ded958f1a" containerName="kube-state-metrics" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.596484 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="614425eb-8ee8-405f-a428-d98ded958f1a" containerName="kube-state-metrics" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.597272 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.602135 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.602329 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.608210 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.745289 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.745367 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.745442 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vckz5\" (UniqueName: \"kubernetes.io/projected/88053629-842a-4282-b167-0a985ca95b54-kube-api-access-vckz5\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.745537 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.847450 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.847581 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.848370 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.848460 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vckz5\" (UniqueName: \"kubernetes.io/projected/88053629-842a-4282-b167-0a985ca95b54-kube-api-access-vckz5\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.851699 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.852764 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.855432 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88053629-842a-4282-b167-0a985ca95b54-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.869170 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vckz5\" (UniqueName: \"kubernetes.io/projected/88053629-842a-4282-b167-0a985ca95b54-kube-api-access-vckz5\") pod \"kube-state-metrics-0\" (UID: \"88053629-842a-4282-b167-0a985ca95b54\") " pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.905297 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="614425eb-8ee8-405f-a428-d98ded958f1a" path="/var/lib/kubelet/pods/614425eb-8ee8-405f-a428-d98ded958f1a/volumes" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.930616 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 26 17:17:49 crc kubenswrapper[5010]: I1126 17:17:49.989142 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 17:17:50 crc kubenswrapper[5010]: I1126 17:17:50.428682 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 26 17:17:50 crc kubenswrapper[5010]: W1126 17:17:50.446471 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88053629_842a_4282_b167_0a985ca95b54.slice/crio-578d84b5d9e0c27177bb7db4eed363d13b8c284894f7a4a5de0e84dab8b9c655 WatchSource:0}: Error finding container 578d84b5d9e0c27177bb7db4eed363d13b8c284894f7a4a5de0e84dab8b9c655: Status 404 returned error can't find the container with id 578d84b5d9e0c27177bb7db4eed363d13b8c284894f7a4a5de0e84dab8b9c655 Nov 26 17:17:50 crc kubenswrapper[5010]: I1126 17:17:50.515834 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerStarted","Data":"137dbc535c13c926b9ea1428c01d8729b94196b67d445ad0797c845eb18e1d52"} Nov 26 17:17:50 crc kubenswrapper[5010]: I1126 17:17:50.522468 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"88053629-842a-4282-b167-0a985ca95b54","Type":"ContainerStarted","Data":"578d84b5d9e0c27177bb7db4eed363d13b8c284894f7a4a5de0e84dab8b9c655"} Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.413189 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.414143 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-central-agent" containerID="cri-o://d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4" gracePeriod=30 Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.414761 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="proxy-httpd" containerID="cri-o://1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d" gracePeriod=30 Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.414822 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="sg-core" containerID="cri-o://16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f" gracePeriod=30 Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.414864 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-notification-agent" containerID="cri-o://d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc" gracePeriod=30 Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.537494 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerStarted","Data":"6449e48989147c3fe1745e269f01c58c095b0851a80395cd4e1ed29a381df9e6"} Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.539482 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"88053629-842a-4282-b167-0a985ca95b54","Type":"ContainerStarted","Data":"795aeeebc03257fc5ac0f41ec1cf3e909c5a53895913de3d664895b44804c568"} Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.541923 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 26 17:17:51 crc kubenswrapper[5010]: I1126 17:17:51.583097 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.218459352 podStartE2EDuration="2.583073598s" podCreationTimestamp="2025-11-26 17:17:49 +0000 UTC" firstStartedPulling="2025-11-26 17:17:50.44939511 +0000 UTC m=+6691.240112258" lastFinishedPulling="2025-11-26 17:17:50.814009356 +0000 UTC m=+6691.604726504" observedRunningTime="2025-11-26 17:17:51.55822354 +0000 UTC m=+6692.348940698" watchObservedRunningTime="2025-11-26 17:17:51.583073598 +0000 UTC m=+6692.373790746" Nov 26 17:17:51 crc kubenswrapper[5010]: E1126 17:17:51.596097 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74fe8866_bdcf_4890_96a4_6494b5c64866.slice/crio-1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:17:52 crc kubenswrapper[5010]: I1126 17:17:52.552332 5010 generic.go:334] "Generic (PLEG): container finished" podID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerID="1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d" exitCode=0 Nov 26 17:17:52 crc kubenswrapper[5010]: I1126 17:17:52.552981 5010 generic.go:334] "Generic (PLEG): container finished" podID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerID="16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f" exitCode=2 Nov 26 17:17:52 crc kubenswrapper[5010]: I1126 17:17:52.552416 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerDied","Data":"1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d"} Nov 26 17:17:52 crc kubenswrapper[5010]: I1126 17:17:52.553039 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerDied","Data":"16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f"} Nov 26 17:17:52 crc kubenswrapper[5010]: I1126 17:17:52.553056 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerDied","Data":"d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4"} Nov 26 17:17:52 crc kubenswrapper[5010]: I1126 17:17:52.552996 5010 generic.go:334] "Generic (PLEG): container finished" podID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerID="d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4" exitCode=0 Nov 26 17:17:53 crc kubenswrapper[5010]: I1126 17:17:53.403665 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 26 17:17:53 crc kubenswrapper[5010]: I1126 17:17:53.563366 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerStarted","Data":"a203b4e22ad07366684f61421f94f25270925fa7a46a7b34a3385840e91e1a62"} Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.576357 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.578846 5010 generic.go:334] "Generic (PLEG): container finished" podID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerID="d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc" exitCode=0 Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.578896 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerDied","Data":"d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc"} Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.578927 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"74fe8866-bdcf-4890-96a4-6494b5c64866","Type":"ContainerDied","Data":"fcff79e1a31bc1451343e775a33203cd70385458f079b84e35e8ba8633d7d3a3"} Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.578948 5010 scope.go:117] "RemoveContainer" containerID="1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.649401 5010 scope.go:117] "RemoveContainer" containerID="16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.679996 5010 scope.go:117] "RemoveContainer" containerID="d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.696851 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-sg-core-conf-yaml\") pod \"74fe8866-bdcf-4890-96a4-6494b5c64866\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.697050 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-combined-ca-bundle\") pod \"74fe8866-bdcf-4890-96a4-6494b5c64866\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.697149 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-config-data\") pod \"74fe8866-bdcf-4890-96a4-6494b5c64866\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.697180 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-scripts\") pod \"74fe8866-bdcf-4890-96a4-6494b5c64866\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.697324 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-run-httpd\") pod \"74fe8866-bdcf-4890-96a4-6494b5c64866\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.697355 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zwv8\" (UniqueName: \"kubernetes.io/projected/74fe8866-bdcf-4890-96a4-6494b5c64866-kube-api-access-7zwv8\") pod \"74fe8866-bdcf-4890-96a4-6494b5c64866\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.697378 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-log-httpd\") pod \"74fe8866-bdcf-4890-96a4-6494b5c64866\" (UID: \"74fe8866-bdcf-4890-96a4-6494b5c64866\") " Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.698183 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "74fe8866-bdcf-4890-96a4-6494b5c64866" (UID: "74fe8866-bdcf-4890-96a4-6494b5c64866"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.699026 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "74fe8866-bdcf-4890-96a4-6494b5c64866" (UID: "74fe8866-bdcf-4890-96a4-6494b5c64866"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.704797 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74fe8866-bdcf-4890-96a4-6494b5c64866-kube-api-access-7zwv8" (OuterVolumeSpecName: "kube-api-access-7zwv8") pod "74fe8866-bdcf-4890-96a4-6494b5c64866" (UID: "74fe8866-bdcf-4890-96a4-6494b5c64866"). InnerVolumeSpecName "kube-api-access-7zwv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.707127 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-scripts" (OuterVolumeSpecName: "scripts") pod "74fe8866-bdcf-4890-96a4-6494b5c64866" (UID: "74fe8866-bdcf-4890-96a4-6494b5c64866"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.713331 5010 scope.go:117] "RemoveContainer" containerID="d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.801127 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.801162 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zwv8\" (UniqueName: \"kubernetes.io/projected/74fe8866-bdcf-4890-96a4-6494b5c64866-kube-api-access-7zwv8\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.801174 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/74fe8866-bdcf-4890-96a4-6494b5c64866-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.801182 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.806521 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "74fe8866-bdcf-4890-96a4-6494b5c64866" (UID: "74fe8866-bdcf-4890-96a4-6494b5c64866"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.847937 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74fe8866-bdcf-4890-96a4-6494b5c64866" (UID: "74fe8866-bdcf-4890-96a4-6494b5c64866"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.848145 5010 scope.go:117] "RemoveContainer" containerID="1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d" Nov 26 17:17:54 crc kubenswrapper[5010]: E1126 17:17:54.848624 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d\": container with ID starting with 1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d not found: ID does not exist" containerID="1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.848659 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d"} err="failed to get container status \"1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d\": rpc error: code = NotFound desc = could not find container \"1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d\": container with ID starting with 1d73070fd7e75fc9e648fa9111cbe1673966c36d75f5b01fd35f40bb7083119d not found: ID does not exist" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.848679 5010 scope.go:117] "RemoveContainer" containerID="16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f" Nov 26 17:17:54 crc kubenswrapper[5010]: E1126 17:17:54.849512 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f\": container with ID starting with 16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f not found: ID does not exist" containerID="16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.849533 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f"} err="failed to get container status \"16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f\": rpc error: code = NotFound desc = could not find container \"16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f\": container with ID starting with 16560cee077ba7c2c242ddca01e8539cef65f0439c3b2d74e266554ebf4b5f5f not found: ID does not exist" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.849546 5010 scope.go:117] "RemoveContainer" containerID="d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc" Nov 26 17:17:54 crc kubenswrapper[5010]: E1126 17:17:54.849955 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc\": container with ID starting with d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc not found: ID does not exist" containerID="d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.849975 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc"} err="failed to get container status \"d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc\": rpc error: code = NotFound desc = could not find container \"d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc\": container with ID starting with d9ec5bd75ef97c776bc2f8610ef96a6af0d38eaafdd79a4c81ded5e2bd7579cc not found: ID does not exist" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.849988 5010 scope.go:117] "RemoveContainer" containerID="d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4" Nov 26 17:17:54 crc kubenswrapper[5010]: E1126 17:17:54.850487 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4\": container with ID starting with d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4 not found: ID does not exist" containerID="d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.850503 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4"} err="failed to get container status \"d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4\": rpc error: code = NotFound desc = could not find container \"d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4\": container with ID starting with d905afc4f813d5abab9b8542703d92f9f87e3d68a3c2b866fcbe205a510e07f4 not found: ID does not exist" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.870931 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-config-data" (OuterVolumeSpecName: "config-data") pod "74fe8866-bdcf-4890-96a4-6494b5c64866" (UID: "74fe8866-bdcf-4890-96a4-6494b5c64866"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.903176 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.903205 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:54 crc kubenswrapper[5010]: I1126 17:17:54.903217 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/74fe8866-bdcf-4890-96a4-6494b5c64866-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.591984 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerStarted","Data":"279ba6ed73948cd811917f9b541a7801fad85f1fe12d899ef2cb006180895dc3"} Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.593990 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.633135 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.648349 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.663912 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:55 crc kubenswrapper[5010]: E1126 17:17:55.664506 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="proxy-httpd" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.664535 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="proxy-httpd" Nov 26 17:17:55 crc kubenswrapper[5010]: E1126 17:17:55.664561 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-notification-agent" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.664573 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-notification-agent" Nov 26 17:17:55 crc kubenswrapper[5010]: E1126 17:17:55.664613 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-central-agent" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.664626 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-central-agent" Nov 26 17:17:55 crc kubenswrapper[5010]: E1126 17:17:55.664654 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="sg-core" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.664665 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="sg-core" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.665046 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="sg-core" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.665078 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-central-agent" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.665117 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="proxy-httpd" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.665134 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" containerName="ceilometer-notification-agent" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.667870 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.673467 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.673561 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.673650 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.678074 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.746317 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:55 crc kubenswrapper[5010]: E1126 17:17:55.747269 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[ceilometer-tls-certs combined-ca-bundle config-data kube-api-access-2kf94 log-httpd run-httpd scripts sg-core-conf-yaml], unattached volumes=[], failed to process volumes=[ceilometer-tls-certs combined-ca-bundle config-data kube-api-access-2kf94 log-httpd run-httpd scripts sg-core-conf-yaml]: context canceled" pod="openstack/ceilometer-0" podUID="47b15000-c67d-4709-b2a6-44cd14ef11ec" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834280 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-config-data\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834332 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kf94\" (UniqueName: \"kubernetes.io/projected/47b15000-c67d-4709-b2a6-44cd14ef11ec-kube-api-access-2kf94\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834360 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834728 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834861 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-scripts\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834895 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834921 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-log-httpd\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.834939 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-run-httpd\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.903680 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74fe8866-bdcf-4890-96a4-6494b5c64866" path="/var/lib/kubelet/pods/74fe8866-bdcf-4890-96a4-6494b5c64866/volumes" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.936911 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.936965 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-scripts\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.936991 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.937013 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-run-httpd\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.937032 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-log-httpd\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.937158 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-config-data\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.937183 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kf94\" (UniqueName: \"kubernetes.io/projected/47b15000-c67d-4709-b2a6-44cd14ef11ec-kube-api-access-2kf94\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.937209 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.938212 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-log-httpd\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.938359 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-run-httpd\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.942144 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-config-data\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.944356 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.944474 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-scripts\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.949601 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.953570 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:55 crc kubenswrapper[5010]: I1126 17:17:55.953880 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kf94\" (UniqueName: \"kubernetes.io/projected/47b15000-c67d-4709-b2a6-44cd14ef11ec-kube-api-access-2kf94\") pod \"ceilometer-0\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " pod="openstack/ceilometer-0" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.603161 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.625895 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.754628 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kf94\" (UniqueName: \"kubernetes.io/projected/47b15000-c67d-4709-b2a6-44cd14ef11ec-kube-api-access-2kf94\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.754760 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-sg-core-conf-yaml\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.754883 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-config-data\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.754926 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-log-httpd\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.754943 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-scripts\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.754986 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-ceilometer-tls-certs\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.755007 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-run-httpd\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.755067 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-combined-ca-bundle\") pod \"47b15000-c67d-4709-b2a6-44cd14ef11ec\" (UID: \"47b15000-c67d-4709-b2a6-44cd14ef11ec\") " Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.755744 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.756276 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.760049 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.760822 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.760849 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-config-data" (OuterVolumeSpecName: "config-data") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.762031 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.763907 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47b15000-c67d-4709-b2a6-44cd14ef11ec-kube-api-access-2kf94" (OuterVolumeSpecName: "kube-api-access-2kf94") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "kube-api-access-2kf94". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.772785 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-scripts" (OuterVolumeSpecName: "scripts") pod "47b15000-c67d-4709-b2a6-44cd14ef11ec" (UID: "47b15000-c67d-4709-b2a6-44cd14ef11ec"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.856974 5010 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.857010 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.857020 5010 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.857031 5010 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/47b15000-c67d-4709-b2a6-44cd14ef11ec-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.857040 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.857048 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kf94\" (UniqueName: \"kubernetes.io/projected/47b15000-c67d-4709-b2a6-44cd14ef11ec-kube-api-access-2kf94\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.857057 5010 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:56 crc kubenswrapper[5010]: I1126 17:17:56.857064 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47b15000-c67d-4709-b2a6-44cd14ef11ec-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.621805 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.735850 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.749539 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.760500 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.762912 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.766234 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.766380 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.766390 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.772820 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.901541 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.901827 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7045bdf2-1e74-43b5-9568-895046c3b8b2-log-httpd\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.901893 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.901910 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7045bdf2-1e74-43b5-9568-895046c3b8b2-run-httpd\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.901938 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.901996 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-scripts\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.902020 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbntm\" (UniqueName: \"kubernetes.io/projected/7045bdf2-1e74-43b5-9568-895046c3b8b2-kube-api-access-lbntm\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.902076 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-config-data\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:57 crc kubenswrapper[5010]: I1126 17:17:57.902897 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47b15000-c67d-4709-b2a6-44cd14ef11ec" path="/var/lib/kubelet/pods/47b15000-c67d-4709-b2a6-44cd14ef11ec/volumes" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003636 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-scripts\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003684 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbntm\" (UniqueName: \"kubernetes.io/projected/7045bdf2-1e74-43b5-9568-895046c3b8b2-kube-api-access-lbntm\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003790 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-config-data\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003841 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003865 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7045bdf2-1e74-43b5-9568-895046c3b8b2-log-httpd\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003933 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003949 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7045bdf2-1e74-43b5-9568-895046c3b8b2-run-httpd\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.003973 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.005152 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7045bdf2-1e74-43b5-9568-895046c3b8b2-run-httpd\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.005291 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7045bdf2-1e74-43b5-9568-895046c3b8b2-log-httpd\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.010279 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.011082 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.012235 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.013408 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-config-data\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.013642 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7045bdf2-1e74-43b5-9568-895046c3b8b2-scripts\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.027432 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbntm\" (UniqueName: \"kubernetes.io/projected/7045bdf2-1e74-43b5-9568-895046c3b8b2-kube-api-access-lbntm\") pod \"ceilometer-0\" (UID: \"7045bdf2-1e74-43b5-9568-895046c3b8b2\") " pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.241254 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.635554 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerStarted","Data":"15984535f5b1be2bbdeeef33e6c11d7c02a603059c8c3f0d5a70e5c15ef05e3c"} Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.635766 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-api" containerID="cri-o://6449e48989147c3fe1745e269f01c58c095b0851a80395cd4e1ed29a381df9e6" gracePeriod=30 Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.635896 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-evaluator" containerID="cri-o://a203b4e22ad07366684f61421f94f25270925fa7a46a7b34a3385840e91e1a62" gracePeriod=30 Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.635835 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-notifier" containerID="cri-o://279ba6ed73948cd811917f9b541a7801fad85f1fe12d899ef2cb006180895dc3" gracePeriod=30 Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.635896 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-listener" containerID="cri-o://15984535f5b1be2bbdeeef33e6c11d7c02a603059c8c3f0d5a70e5c15ef05e3c" gracePeriod=30 Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.669091 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.943902375 podStartE2EDuration="9.669068717s" podCreationTimestamp="2025-11-26 17:17:49 +0000 UTC" firstStartedPulling="2025-11-26 17:17:50.020621778 +0000 UTC m=+6690.811338926" lastFinishedPulling="2025-11-26 17:17:57.74578812 +0000 UTC m=+6698.536505268" observedRunningTime="2025-11-26 17:17:58.663118109 +0000 UTC m=+6699.453835267" watchObservedRunningTime="2025-11-26 17:17:58.669068717 +0000 UTC m=+6699.459785875" Nov 26 17:17:58 crc kubenswrapper[5010]: I1126 17:17:58.764245 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 26 17:17:59 crc kubenswrapper[5010]: I1126 17:17:59.648360 5010 generic.go:334] "Generic (PLEG): container finished" podID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerID="a203b4e22ad07366684f61421f94f25270925fa7a46a7b34a3385840e91e1a62" exitCode=0 Nov 26 17:17:59 crc kubenswrapper[5010]: I1126 17:17:59.648681 5010 generic.go:334] "Generic (PLEG): container finished" podID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerID="6449e48989147c3fe1745e269f01c58c095b0851a80395cd4e1ed29a381df9e6" exitCode=0 Nov 26 17:17:59 crc kubenswrapper[5010]: I1126 17:17:59.648482 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerDied","Data":"a203b4e22ad07366684f61421f94f25270925fa7a46a7b34a3385840e91e1a62"} Nov 26 17:17:59 crc kubenswrapper[5010]: I1126 17:17:59.648833 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerDied","Data":"6449e48989147c3fe1745e269f01c58c095b0851a80395cd4e1ed29a381df9e6"} Nov 26 17:17:59 crc kubenswrapper[5010]: I1126 17:17:59.650256 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7045bdf2-1e74-43b5-9568-895046c3b8b2","Type":"ContainerStarted","Data":"424ba40d7f1aa87d578aeaaac4ae37ec162b070eb6e204d3de2f98cdfe4208cc"} Nov 26 17:17:59 crc kubenswrapper[5010]: I1126 17:17:59.951851 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.069767 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-be3a-account-create-update-lpt74"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.085608 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-kz6ff"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.095586 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-d145-account-create-update-xxj6c"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.107778 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-8l9b6"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.123665 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-kz6ff"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.134199 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-c5dd-account-create-update-n6nql"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.142864 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-be3a-account-create-update-lpt74"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.151582 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-vqcbv"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.160174 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-c5dd-account-create-update-n6nql"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.169211 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-8l9b6"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.178660 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-vqcbv"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.216948 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-d145-account-create-update-xxj6c"] Nov 26 17:18:00 crc kubenswrapper[5010]: I1126 17:18:00.661466 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7045bdf2-1e74-43b5-9568-895046c3b8b2","Type":"ContainerStarted","Data":"313543d72eebe3d2aac2cc3dbd14621c184d8623b00f4459163bc1d666c6b982"} Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.690068 5010 generic.go:334] "Generic (PLEG): container finished" podID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerID="279ba6ed73948cd811917f9b541a7801fad85f1fe12d899ef2cb006180895dc3" exitCode=0 Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.690166 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerDied","Data":"279ba6ed73948cd811917f9b541a7801fad85f1fe12d899ef2cb006180895dc3"} Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.697552 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7045bdf2-1e74-43b5-9568-895046c3b8b2","Type":"ContainerStarted","Data":"4771d008fff90286e16c31e659a6982cd03b96e01e556a2d9b77055516cff158"} Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.907487 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1370ec66-0402-4b67-acf3-ddeb0c734107" path="/var/lib/kubelet/pods/1370ec66-0402-4b67-acf3-ddeb0c734107/volumes" Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.908148 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47d38398-693a-429e-9941-aff2dd54a904" path="/var/lib/kubelet/pods/47d38398-693a-429e-9941-aff2dd54a904/volumes" Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.908742 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56a19aa9-1344-4742-9705-0dc8191f47a5" path="/var/lib/kubelet/pods/56a19aa9-1344-4742-9705-0dc8191f47a5/volumes" Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.909339 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82d71aae-da28-4c09-9c6e-f665982ea911" path="/var/lib/kubelet/pods/82d71aae-da28-4c09-9c6e-f665982ea911/volumes" Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.911997 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf8161da-57ca-4a25-928d-ec41f12b6916" path="/var/lib/kubelet/pods/bf8161da-57ca-4a25-928d-ec41f12b6916/volumes" Nov 26 17:18:01 crc kubenswrapper[5010]: I1126 17:18:01.913428 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6" path="/var/lib/kubelet/pods/c7ef6ddb-c6bd-4e0f-8eb1-8ff0fa22c3b6/volumes" Nov 26 17:18:02 crc kubenswrapper[5010]: I1126 17:18:02.757104 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7045bdf2-1e74-43b5-9568-895046c3b8b2","Type":"ContainerStarted","Data":"f22ec2fac54d27e62768efb05302aa89a1e8251c7b7b0f29fc759fbafd5b52e1"} Nov 26 17:18:04 crc kubenswrapper[5010]: I1126 17:18:04.788870 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7045bdf2-1e74-43b5-9568-895046c3b8b2","Type":"ContainerStarted","Data":"af9e2efc476e6f7cdfcf5fae8a522eff4c9a0e0f9728bfb5c18d642710e23c91"} Nov 26 17:18:04 crc kubenswrapper[5010]: I1126 17:18:04.789520 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 26 17:18:09 crc kubenswrapper[5010]: I1126 17:18:09.797139 5010 scope.go:117] "RemoveContainer" containerID="82443df6ef8f7f2a9ad789d44aafa8a0a0cc09b0b64c0b29d03d07129a5fa2e1" Nov 26 17:18:09 crc kubenswrapper[5010]: I1126 17:18:09.825606 5010 scope.go:117] "RemoveContainer" containerID="7d6fefc8f5fe836bcecfccd6737617e015d761e65db290fc3d3b476d48bb961d" Nov 26 17:18:09 crc kubenswrapper[5010]: I1126 17:18:09.892852 5010 scope.go:117] "RemoveContainer" containerID="acaa88689664d9ad22644fadb3acfeb8b28c2521b691fc660611dafc348326a9" Nov 26 17:18:09 crc kubenswrapper[5010]: I1126 17:18:09.944052 5010 scope.go:117] "RemoveContainer" containerID="1a44697e4959368bad2f6f6bacdf084e1515de8b4a8a1aa5d304515f7e152d20" Nov 26 17:18:09 crc kubenswrapper[5010]: I1126 17:18:09.997252 5010 scope.go:117] "RemoveContainer" containerID="ab1efd807dac43def7c48c3441d4a6b75a2322e163e1ab616526468f5b7bf3e1" Nov 26 17:18:10 crc kubenswrapper[5010]: I1126 17:18:10.030027 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=8.228844011 podStartE2EDuration="13.030003579s" podCreationTimestamp="2025-11-26 17:17:57 +0000 UTC" firstStartedPulling="2025-11-26 17:17:58.769954735 +0000 UTC m=+6699.560671883" lastFinishedPulling="2025-11-26 17:18:03.571114313 +0000 UTC m=+6704.361831451" observedRunningTime="2025-11-26 17:18:04.815329 +0000 UTC m=+6705.606046188" watchObservedRunningTime="2025-11-26 17:18:10.030003579 +0000 UTC m=+6710.820720727" Nov 26 17:18:10 crc kubenswrapper[5010]: I1126 17:18:10.032490 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-r8hq8"] Nov 26 17:18:10 crc kubenswrapper[5010]: I1126 17:18:10.050939 5010 scope.go:117] "RemoveContainer" containerID="ec47517e8cd43382dbe26789f6aa5ce2cf3f5ccf9508cee8822058294d49f8fa" Nov 26 17:18:10 crc kubenswrapper[5010]: I1126 17:18:10.056741 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-r8hq8"] Nov 26 17:18:11 crc kubenswrapper[5010]: I1126 17:18:11.909033 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dd72f90-6373-4087-9121-8843150bd264" path="/var/lib/kubelet/pods/5dd72f90-6373-4087-9121-8843150bd264/volumes" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.319258 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-jm6xs"] Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.321804 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.379037 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-utilities\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.379111 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-catalog-content\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.379506 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6whfx\" (UniqueName: \"kubernetes.io/projected/90ee0724-a188-437a-9dce-be3903724b53-kube-api-access-6whfx\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.413001 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jm6xs"] Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.488483 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-utilities\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.488606 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-catalog-content\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.489446 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-catalog-content\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.489441 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-utilities\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.489624 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6whfx\" (UniqueName: \"kubernetes.io/projected/90ee0724-a188-437a-9dce-be3903724b53-kube-api-access-6whfx\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.522309 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6whfx\" (UniqueName: \"kubernetes.io/projected/90ee0724-a188-437a-9dce-be3903724b53-kube-api-access-6whfx\") pod \"certified-operators-jm6xs\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:12 crc kubenswrapper[5010]: I1126 17:18:12.687302 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:13 crc kubenswrapper[5010]: I1126 17:18:13.243437 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-jm6xs"] Nov 26 17:18:13 crc kubenswrapper[5010]: I1126 17:18:13.908089 5010 generic.go:334] "Generic (PLEG): container finished" podID="90ee0724-a188-437a-9dce-be3903724b53" containerID="24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c" exitCode=0 Nov 26 17:18:13 crc kubenswrapper[5010]: I1126 17:18:13.908200 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jm6xs" event={"ID":"90ee0724-a188-437a-9dce-be3903724b53","Type":"ContainerDied","Data":"24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c"} Nov 26 17:18:13 crc kubenswrapper[5010]: I1126 17:18:13.908614 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jm6xs" event={"ID":"90ee0724-a188-437a-9dce-be3903724b53","Type":"ContainerStarted","Data":"55e7fe1703d9edadb1b54f04a7602d6859e6bc3c792c409a55616e08401407dc"} Nov 26 17:18:15 crc kubenswrapper[5010]: I1126 17:18:15.937197 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jm6xs" event={"ID":"90ee0724-a188-437a-9dce-be3903724b53","Type":"ContainerStarted","Data":"e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793"} Nov 26 17:18:16 crc kubenswrapper[5010]: I1126 17:18:16.964472 5010 generic.go:334] "Generic (PLEG): container finished" podID="90ee0724-a188-437a-9dce-be3903724b53" containerID="e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793" exitCode=0 Nov 26 17:18:16 crc kubenswrapper[5010]: I1126 17:18:16.964606 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jm6xs" event={"ID":"90ee0724-a188-437a-9dce-be3903724b53","Type":"ContainerDied","Data":"e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793"} Nov 26 17:18:19 crc kubenswrapper[5010]: I1126 17:18:19.000038 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jm6xs" event={"ID":"90ee0724-a188-437a-9dce-be3903724b53","Type":"ContainerStarted","Data":"1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311"} Nov 26 17:18:19 crc kubenswrapper[5010]: I1126 17:18:19.024917 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-jm6xs" podStartSLOduration=2.5227078670000003 podStartE2EDuration="7.02489467s" podCreationTimestamp="2025-11-26 17:18:12 +0000 UTC" firstStartedPulling="2025-11-26 17:18:13.91031403 +0000 UTC m=+6714.701031178" lastFinishedPulling="2025-11-26 17:18:18.412500823 +0000 UTC m=+6719.203217981" observedRunningTime="2025-11-26 17:18:19.01844 +0000 UTC m=+6719.809157178" watchObservedRunningTime="2025-11-26 17:18:19.02489467 +0000 UTC m=+6719.815611818" Nov 26 17:18:22 crc kubenswrapper[5010]: I1126 17:18:22.698784 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:22 crc kubenswrapper[5010]: I1126 17:18:22.699475 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:22 crc kubenswrapper[5010]: I1126 17:18:22.749791 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:23 crc kubenswrapper[5010]: I1126 17:18:23.112325 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:23 crc kubenswrapper[5010]: I1126 17:18:23.193032 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jm6xs"] Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.069133 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-jm6xs" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="registry-server" containerID="cri-o://1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311" gracePeriod=2 Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.773998 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.842171 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6whfx\" (UniqueName: \"kubernetes.io/projected/90ee0724-a188-437a-9dce-be3903724b53-kube-api-access-6whfx\") pod \"90ee0724-a188-437a-9dce-be3903724b53\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.842247 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-utilities\") pod \"90ee0724-a188-437a-9dce-be3903724b53\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.842324 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-catalog-content\") pod \"90ee0724-a188-437a-9dce-be3903724b53\" (UID: \"90ee0724-a188-437a-9dce-be3903724b53\") " Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.843200 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-utilities" (OuterVolumeSpecName: "utilities") pod "90ee0724-a188-437a-9dce-be3903724b53" (UID: "90ee0724-a188-437a-9dce-be3903724b53"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.848323 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/90ee0724-a188-437a-9dce-be3903724b53-kube-api-access-6whfx" (OuterVolumeSpecName: "kube-api-access-6whfx") pod "90ee0724-a188-437a-9dce-be3903724b53" (UID: "90ee0724-a188-437a-9dce-be3903724b53"). InnerVolumeSpecName "kube-api-access-6whfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.899030 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "90ee0724-a188-437a-9dce-be3903724b53" (UID: "90ee0724-a188-437a-9dce-be3903724b53"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.945874 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.945905 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6whfx\" (UniqueName: \"kubernetes.io/projected/90ee0724-a188-437a-9dce-be3903724b53-kube-api-access-6whfx\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:25 crc kubenswrapper[5010]: I1126 17:18:25.945916 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/90ee0724-a188-437a-9dce-be3903724b53-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.083147 5010 generic.go:334] "Generic (PLEG): container finished" podID="90ee0724-a188-437a-9dce-be3903724b53" containerID="1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311" exitCode=0 Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.083199 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-jm6xs" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.083203 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jm6xs" event={"ID":"90ee0724-a188-437a-9dce-be3903724b53","Type":"ContainerDied","Data":"1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311"} Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.083250 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-jm6xs" event={"ID":"90ee0724-a188-437a-9dce-be3903724b53","Type":"ContainerDied","Data":"55e7fe1703d9edadb1b54f04a7602d6859e6bc3c792c409a55616e08401407dc"} Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.083276 5010 scope.go:117] "RemoveContainer" containerID="1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.123052 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-jm6xs"] Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.128282 5010 scope.go:117] "RemoveContainer" containerID="e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.138296 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-jm6xs"] Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.164123 5010 scope.go:117] "RemoveContainer" containerID="24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.225468 5010 scope.go:117] "RemoveContainer" containerID="1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311" Nov 26 17:18:26 crc kubenswrapper[5010]: E1126 17:18:26.226200 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311\": container with ID starting with 1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311 not found: ID does not exist" containerID="1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.226233 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311"} err="failed to get container status \"1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311\": rpc error: code = NotFound desc = could not find container \"1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311\": container with ID starting with 1d7c9aac57382d88a192ab1a5f6e047c448e4d80d8a733becc6d42170f683311 not found: ID does not exist" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.226255 5010 scope.go:117] "RemoveContainer" containerID="e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793" Nov 26 17:18:26 crc kubenswrapper[5010]: E1126 17:18:26.226672 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793\": container with ID starting with e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793 not found: ID does not exist" containerID="e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.226701 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793"} err="failed to get container status \"e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793\": rpc error: code = NotFound desc = could not find container \"e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793\": container with ID starting with e3618b4f496da22038dafa320fadc90d7112add399f7058661e8b14f8da0f793 not found: ID does not exist" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.226731 5010 scope.go:117] "RemoveContainer" containerID="24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c" Nov 26 17:18:26 crc kubenswrapper[5010]: E1126 17:18:26.227513 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c\": container with ID starting with 24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c not found: ID does not exist" containerID="24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c" Nov 26 17:18:26 crc kubenswrapper[5010]: I1126 17:18:26.227620 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c"} err="failed to get container status \"24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c\": rpc error: code = NotFound desc = could not find container \"24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c\": container with ID starting with 24ef0904e5a71659896e00d5f2a2ddcaff9dec67abd388e8bb40bb30ca027f5c not found: ID does not exist" Nov 26 17:18:27 crc kubenswrapper[5010]: I1126 17:18:27.906887 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="90ee0724-a188-437a-9dce-be3903724b53" path="/var/lib/kubelet/pods/90ee0724-a188-437a-9dce-be3903724b53/volumes" Nov 26 17:18:28 crc kubenswrapper[5010]: I1126 17:18:28.260654 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.062690 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-j2nzv"] Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.094008 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-j2nzv"] Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.131045 5010 generic.go:334] "Generic (PLEG): container finished" podID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerID="15984535f5b1be2bbdeeef33e6c11d7c02a603059c8c3f0d5a70e5c15ef05e3c" exitCode=137 Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.131090 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerDied","Data":"15984535f5b1be2bbdeeef33e6c11d7c02a603059c8c3f0d5a70e5c15ef05e3c"} Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.131114 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"304dbb93-0877-42a8-87f9-61e3fcf5bbca","Type":"ContainerDied","Data":"137dbc535c13c926b9ea1428c01d8729b94196b67d445ad0797c845eb18e1d52"} Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.131124 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="137dbc535c13c926b9ea1428c01d8729b94196b67d445ad0797c845eb18e1d52" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.196434 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.231916 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mhgsq\" (UniqueName: \"kubernetes.io/projected/304dbb93-0877-42a8-87f9-61e3fcf5bbca-kube-api-access-mhgsq\") pod \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.231986 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-scripts\") pod \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.232038 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-combined-ca-bundle\") pod \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.232280 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data\") pod \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.236819 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/304dbb93-0877-42a8-87f9-61e3fcf5bbca-kube-api-access-mhgsq" (OuterVolumeSpecName: "kube-api-access-mhgsq") pod "304dbb93-0877-42a8-87f9-61e3fcf5bbca" (UID: "304dbb93-0877-42a8-87f9-61e3fcf5bbca"). InnerVolumeSpecName "kube-api-access-mhgsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.250975 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-scripts" (OuterVolumeSpecName: "scripts") pod "304dbb93-0877-42a8-87f9-61e3fcf5bbca" (UID: "304dbb93-0877-42a8-87f9-61e3fcf5bbca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.337383 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mhgsq\" (UniqueName: \"kubernetes.io/projected/304dbb93-0877-42a8-87f9-61e3fcf5bbca-kube-api-access-mhgsq\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.337417 5010 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-scripts\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.410252 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "304dbb93-0877-42a8-87f9-61e3fcf5bbca" (UID: "304dbb93-0877-42a8-87f9-61e3fcf5bbca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.439073 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data" (OuterVolumeSpecName: "config-data") pod "304dbb93-0877-42a8-87f9-61e3fcf5bbca" (UID: "304dbb93-0877-42a8-87f9-61e3fcf5bbca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.439777 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data\") pod \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\" (UID: \"304dbb93-0877-42a8-87f9-61e3fcf5bbca\") " Nov 26 17:18:29 crc kubenswrapper[5010]: W1126 17:18:29.439913 5010 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/304dbb93-0877-42a8-87f9-61e3fcf5bbca/volumes/kubernetes.io~secret/config-data Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.439932 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data" (OuterVolumeSpecName: "config-data") pod "304dbb93-0877-42a8-87f9-61e3fcf5bbca" (UID: "304dbb93-0877-42a8-87f9-61e3fcf5bbca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.440573 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.440595 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/304dbb93-0877-42a8-87f9-61e3fcf5bbca-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:29 crc kubenswrapper[5010]: I1126 17:18:29.905796 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0c7721f-be82-4859-874e-8e73cad59726" path="/var/lib/kubelet/pods/e0c7721f-be82-4859-874e-8e73cad59726/volumes" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.032806 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-5cwcg"] Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.042917 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-5cwcg"] Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.140766 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.167421 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.179826 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197106 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Nov 26 17:18:30 crc kubenswrapper[5010]: E1126 17:18:30.197626 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-notifier" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197645 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-notifier" Nov 26 17:18:30 crc kubenswrapper[5010]: E1126 17:18:30.197677 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="registry-server" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197686 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="registry-server" Nov 26 17:18:30 crc kubenswrapper[5010]: E1126 17:18:30.197700 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-listener" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197727 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-listener" Nov 26 17:18:30 crc kubenswrapper[5010]: E1126 17:18:30.197770 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-evaluator" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197779 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-evaluator" Nov 26 17:18:30 crc kubenswrapper[5010]: E1126 17:18:30.197798 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="extract-content" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197807 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="extract-content" Nov 26 17:18:30 crc kubenswrapper[5010]: E1126 17:18:30.197822 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-api" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197830 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-api" Nov 26 17:18:30 crc kubenswrapper[5010]: E1126 17:18:30.197841 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="extract-utilities" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.197849 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="extract-utilities" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.198082 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-notifier" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.198107 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-api" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.198127 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="90ee0724-a188-437a-9dce-be3903724b53" containerName="registry-server" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.198150 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-evaluator" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.198173 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" containerName="aodh-listener" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.200726 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.203973 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.204421 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.204744 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.204898 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.204947 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-kt4xm" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.215445 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.256546 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-config-data\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.256607 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-scripts\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.256626 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klx77\" (UniqueName: \"kubernetes.io/projected/3d638ae6-520f-40ce-af64-abedb51668a6-kube-api-access-klx77\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.256648 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.256733 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-internal-tls-certs\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.256753 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-public-tls-certs\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.358490 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-config-data\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.358549 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-scripts\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.358573 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klx77\" (UniqueName: \"kubernetes.io/projected/3d638ae6-520f-40ce-af64-abedb51668a6-kube-api-access-klx77\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.358596 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.358661 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-internal-tls-certs\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.358684 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-public-tls-certs\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.364840 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-scripts\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.365344 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-internal-tls-certs\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.365498 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.365498 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-public-tls-certs\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.371008 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d638ae6-520f-40ce-af64-abedb51668a6-config-data\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.375137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klx77\" (UniqueName: \"kubernetes.io/projected/3d638ae6-520f-40ce-af64-abedb51668a6-kube-api-access-klx77\") pod \"aodh-0\" (UID: \"3d638ae6-520f-40ce-af64-abedb51668a6\") " pod="openstack/aodh-0" Nov 26 17:18:30 crc kubenswrapper[5010]: I1126 17:18:30.559380 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Nov 26 17:18:31 crc kubenswrapper[5010]: W1126 17:18:31.056246 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d638ae6_520f_40ce_af64_abedb51668a6.slice/crio-acb0aead16e949f7d5b782311ad93f058db67a64223fa61bb68f942780ed71b6 WatchSource:0}: Error finding container acb0aead16e949f7d5b782311ad93f058db67a64223fa61bb68f942780ed71b6: Status 404 returned error can't find the container with id acb0aead16e949f7d5b782311ad93f058db67a64223fa61bb68f942780ed71b6 Nov 26 17:18:31 crc kubenswrapper[5010]: I1126 17:18:31.059381 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Nov 26 17:18:31 crc kubenswrapper[5010]: I1126 17:18:31.150152 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3d638ae6-520f-40ce-af64-abedb51668a6","Type":"ContainerStarted","Data":"acb0aead16e949f7d5b782311ad93f058db67a64223fa61bb68f942780ed71b6"} Nov 26 17:18:31 crc kubenswrapper[5010]: I1126 17:18:31.906524 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="304dbb93-0877-42a8-87f9-61e3fcf5bbca" path="/var/lib/kubelet/pods/304dbb93-0877-42a8-87f9-61e3fcf5bbca/volumes" Nov 26 17:18:31 crc kubenswrapper[5010]: I1126 17:18:31.908458 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8775a1b7-7530-4163-9d34-b435a78fe316" path="/var/lib/kubelet/pods/8775a1b7-7530-4163-9d34-b435a78fe316/volumes" Nov 26 17:18:33 crc kubenswrapper[5010]: I1126 17:18:33.175874 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3d638ae6-520f-40ce-af64-abedb51668a6","Type":"ContainerStarted","Data":"ec78475183e62b5c591168815c697dad0db2ffdb8d5f7acb328204ecd71843e1"} Nov 26 17:18:33 crc kubenswrapper[5010]: I1126 17:18:33.176662 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3d638ae6-520f-40ce-af64-abedb51668a6","Type":"ContainerStarted","Data":"6d000070881844ca84e38f705bd047eb2fe73f0c39b882e7650221919610c36c"} Nov 26 17:18:34 crc kubenswrapper[5010]: I1126 17:18:34.197867 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3d638ae6-520f-40ce-af64-abedb51668a6","Type":"ContainerStarted","Data":"f0c93a16e788c7ee708ce9c6f298845601dada66f79777c2b3857ebf733f0575"} Nov 26 17:18:35 crc kubenswrapper[5010]: I1126 17:18:35.207992 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3d638ae6-520f-40ce-af64-abedb51668a6","Type":"ContainerStarted","Data":"e235373efe1e60efe52d253f73512653b7a3e465f612f78a8782d65cf1f1c0f8"} Nov 26 17:18:35 crc kubenswrapper[5010]: I1126 17:18:35.234569 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.7036123779999999 podStartE2EDuration="5.234551002s" podCreationTimestamp="2025-11-26 17:18:30 +0000 UTC" firstStartedPulling="2025-11-26 17:18:31.058445187 +0000 UTC m=+6731.849162335" lastFinishedPulling="2025-11-26 17:18:34.589383801 +0000 UTC m=+6735.380100959" observedRunningTime="2025-11-26 17:18:35.227775284 +0000 UTC m=+6736.018492442" watchObservedRunningTime="2025-11-26 17:18:35.234551002 +0000 UTC m=+6736.025268150" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.085900 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6dfd49754f-7bmtn"] Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.088072 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.094415 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.105109 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dfd49754f-7bmtn"] Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.250774 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-dns-svc\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.250855 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-nb\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.250943 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-openstack-cell1\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.250985 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lns8h\" (UniqueName: \"kubernetes.io/projected/62d3894e-761d-4a8d-855f-ced842b8930a-kube-api-access-lns8h\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.251018 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-sb\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.251054 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-config\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.353054 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-dns-svc\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.353167 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-nb\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.353274 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-openstack-cell1\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.353355 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lns8h\" (UniqueName: \"kubernetes.io/projected/62d3894e-761d-4a8d-855f-ced842b8930a-kube-api-access-lns8h\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.353440 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-sb\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.353511 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-config\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.354217 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-nb\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.354524 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-openstack-cell1\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.354876 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-sb\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.355121 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-config\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.355279 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-dns-svc\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.391443 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lns8h\" (UniqueName: \"kubernetes.io/projected/62d3894e-761d-4a8d-855f-ced842b8930a-kube-api-access-lns8h\") pod \"dnsmasq-dns-6dfd49754f-7bmtn\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.419573 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:38 crc kubenswrapper[5010]: I1126 17:18:38.989223 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6dfd49754f-7bmtn"] Nov 26 17:18:39 crc kubenswrapper[5010]: I1126 17:18:39.249218 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" event={"ID":"62d3894e-761d-4a8d-855f-ced842b8930a","Type":"ContainerStarted","Data":"cc918c61f984f958ced79c62bd1e67e2bff0fba2aac20a2bb44cd6792048160a"} Nov 26 17:18:39 crc kubenswrapper[5010]: I1126 17:18:39.249585 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" event={"ID":"62d3894e-761d-4a8d-855f-ced842b8930a","Type":"ContainerStarted","Data":"5a9a7525f96138d9a7d22ad45484ed4f1ea653552cc1960024343bf7e0437558"} Nov 26 17:18:40 crc kubenswrapper[5010]: I1126 17:18:40.261831 5010 generic.go:334] "Generic (PLEG): container finished" podID="62d3894e-761d-4a8d-855f-ced842b8930a" containerID="cc918c61f984f958ced79c62bd1e67e2bff0fba2aac20a2bb44cd6792048160a" exitCode=0 Nov 26 17:18:40 crc kubenswrapper[5010]: I1126 17:18:40.261878 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" event={"ID":"62d3894e-761d-4a8d-855f-ced842b8930a","Type":"ContainerDied","Data":"cc918c61f984f958ced79c62bd1e67e2bff0fba2aac20a2bb44cd6792048160a"} Nov 26 17:18:40 crc kubenswrapper[5010]: I1126 17:18:40.261910 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" event={"ID":"62d3894e-761d-4a8d-855f-ced842b8930a","Type":"ContainerStarted","Data":"c0c442fc75bd1dbe35b1a59baf73efb2a7ef0f04206915a9152b9569e14786b8"} Nov 26 17:18:40 crc kubenswrapper[5010]: I1126 17:18:40.262879 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:40 crc kubenswrapper[5010]: I1126 17:18:40.302291 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" podStartSLOduration=2.302260538 podStartE2EDuration="2.302260538s" podCreationTimestamp="2025-11-26 17:18:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:18:40.292374682 +0000 UTC m=+6741.083091860" watchObservedRunningTime="2025-11-26 17:18:40.302260538 +0000 UTC m=+6741.092977716" Nov 26 17:18:46 crc kubenswrapper[5010]: I1126 17:18:46.061753 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-8gjck"] Nov 26 17:18:46 crc kubenswrapper[5010]: I1126 17:18:46.073522 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-8gjck"] Nov 26 17:18:47 crc kubenswrapper[5010]: I1126 17:18:47.914512 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ccb3027-4d8f-452f-b96d-76a970475d7a" path="/var/lib/kubelet/pods/7ccb3027-4d8f-452f-b96d-76a970475d7a/volumes" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.420931 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.502014 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54d7c4984c-c8tbz"] Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.502361 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" podUID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerName="dnsmasq-dns" containerID="cri-o://deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6" gracePeriod=10 Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.676904 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7947bf78cc-xwb6q"] Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.680088 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.700071 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7947bf78cc-xwb6q"] Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.840252 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h22bp\" (UniqueName: \"kubernetes.io/projected/422202d4-c238-4769-9039-1cbbe92950c5-kube-api-access-h22bp\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.840342 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-ovsdbserver-sb\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.840385 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-dns-svc\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.840505 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-ovsdbserver-nb\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.840598 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-openstack-cell1\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.840720 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-config\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.944059 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h22bp\" (UniqueName: \"kubernetes.io/projected/422202d4-c238-4769-9039-1cbbe92950c5-kube-api-access-h22bp\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.944120 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-ovsdbserver-sb\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.944153 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-dns-svc\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.944194 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-ovsdbserver-nb\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.944245 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-openstack-cell1\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.944286 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-config\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.945509 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-config\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.945691 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-dns-svc\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.946069 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-ovsdbserver-nb\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.946482 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-ovsdbserver-sb\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.946979 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/422202d4-c238-4769-9039-1cbbe92950c5-openstack-cell1\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:48 crc kubenswrapper[5010]: I1126 17:18:48.983459 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h22bp\" (UniqueName: \"kubernetes.io/projected/422202d4-c238-4769-9039-1cbbe92950c5-kube-api-access-h22bp\") pod \"dnsmasq-dns-7947bf78cc-xwb6q\" (UID: \"422202d4-c238-4769-9039-1cbbe92950c5\") " pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.012241 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.160263 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.250773 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-config\") pod \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.250835 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-dns-svc\") pod \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.250869 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-nb\") pod \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.250905 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-sb\") pod \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.250934 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqw5x\" (UniqueName: \"kubernetes.io/projected/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-kube-api-access-fqw5x\") pod \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\" (UID: \"e4a27ccd-c8c3-499a-8ce2-463dd5e33842\") " Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.259343 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-kube-api-access-fqw5x" (OuterVolumeSpecName: "kube-api-access-fqw5x") pod "e4a27ccd-c8c3-499a-8ce2-463dd5e33842" (UID: "e4a27ccd-c8c3-499a-8ce2-463dd5e33842"). InnerVolumeSpecName "kube-api-access-fqw5x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.305498 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e4a27ccd-c8c3-499a-8ce2-463dd5e33842" (UID: "e4a27ccd-c8c3-499a-8ce2-463dd5e33842"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.323354 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-config" (OuterVolumeSpecName: "config") pod "e4a27ccd-c8c3-499a-8ce2-463dd5e33842" (UID: "e4a27ccd-c8c3-499a-8ce2-463dd5e33842"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.329481 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e4a27ccd-c8c3-499a-8ce2-463dd5e33842" (UID: "e4a27ccd-c8c3-499a-8ce2-463dd5e33842"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.330388 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e4a27ccd-c8c3-499a-8ce2-463dd5e33842" (UID: "e4a27ccd-c8c3-499a-8ce2-463dd5e33842"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.354264 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.354324 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.354340 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.354352 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.354367 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqw5x\" (UniqueName: \"kubernetes.io/projected/e4a27ccd-c8c3-499a-8ce2-463dd5e33842-kube-api-access-fqw5x\") on node \"crc\" DevicePath \"\"" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.372061 5010 generic.go:334] "Generic (PLEG): container finished" podID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerID="deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6" exitCode=0 Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.372100 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.372129 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" event={"ID":"e4a27ccd-c8c3-499a-8ce2-463dd5e33842","Type":"ContainerDied","Data":"deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6"} Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.372173 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-54d7c4984c-c8tbz" event={"ID":"e4a27ccd-c8c3-499a-8ce2-463dd5e33842","Type":"ContainerDied","Data":"90d590bd744ab7abab7d47657e1e9cc959ed49f7392eadd38aa54cc84c74fe01"} Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.372213 5010 scope.go:117] "RemoveContainer" containerID="deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.421357 5010 scope.go:117] "RemoveContainer" containerID="2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.425700 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-54d7c4984c-c8tbz"] Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.440062 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-54d7c4984c-c8tbz"] Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.458980 5010 scope.go:117] "RemoveContainer" containerID="deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6" Nov 26 17:18:49 crc kubenswrapper[5010]: E1126 17:18:49.460147 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6\": container with ID starting with deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6 not found: ID does not exist" containerID="deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.460206 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6"} err="failed to get container status \"deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6\": rpc error: code = NotFound desc = could not find container \"deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6\": container with ID starting with deea3a59bbd013c1625508ca86e32976ac0e24f711d5f6d7f7cff0e7378934b6 not found: ID does not exist" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.460234 5010 scope.go:117] "RemoveContainer" containerID="2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861" Nov 26 17:18:49 crc kubenswrapper[5010]: E1126 17:18:49.461066 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861\": container with ID starting with 2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861 not found: ID does not exist" containerID="2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.461124 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861"} err="failed to get container status \"2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861\": rpc error: code = NotFound desc = could not find container \"2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861\": container with ID starting with 2d2fd8f64b8312529f57722e0c1056405c8e514efc401914d8cad26b0c3f4861 not found: ID does not exist" Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.487834 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7947bf78cc-xwb6q"] Nov 26 17:18:49 crc kubenswrapper[5010]: I1126 17:18:49.931964 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" path="/var/lib/kubelet/pods/e4a27ccd-c8c3-499a-8ce2-463dd5e33842/volumes" Nov 26 17:18:50 crc kubenswrapper[5010]: I1126 17:18:50.389844 5010 generic.go:334] "Generic (PLEG): container finished" podID="422202d4-c238-4769-9039-1cbbe92950c5" containerID="54b98aa727e24608d110cb4e3d7b86c8b70457b72cbffb7322ea49e7d6910691" exitCode=0 Nov 26 17:18:50 crc kubenswrapper[5010]: I1126 17:18:50.389909 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" event={"ID":"422202d4-c238-4769-9039-1cbbe92950c5","Type":"ContainerDied","Data":"54b98aa727e24608d110cb4e3d7b86c8b70457b72cbffb7322ea49e7d6910691"} Nov 26 17:18:50 crc kubenswrapper[5010]: I1126 17:18:50.391069 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" event={"ID":"422202d4-c238-4769-9039-1cbbe92950c5","Type":"ContainerStarted","Data":"823efa008e91867aae3f41ee87d428d1a764fcf4416c5adffff4e0a860a59630"} Nov 26 17:18:51 crc kubenswrapper[5010]: I1126 17:18:51.411386 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" event={"ID":"422202d4-c238-4769-9039-1cbbe92950c5","Type":"ContainerStarted","Data":"7bdb8bb83e4fcb060af61576e98a61995bf1ac45b4d492155986ac838d150e9c"} Nov 26 17:18:51 crc kubenswrapper[5010]: I1126 17:18:51.412954 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:51 crc kubenswrapper[5010]: I1126 17:18:51.444197 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" podStartSLOduration=3.444178455 podStartE2EDuration="3.444178455s" podCreationTimestamp="2025-11-26 17:18:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:18:51.434694719 +0000 UTC m=+6752.225411917" watchObservedRunningTime="2025-11-26 17:18:51.444178455 +0000 UTC m=+6752.234895613" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.488013 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l"] Nov 26 17:18:54 crc kubenswrapper[5010]: E1126 17:18:54.489377 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerName="init" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.489401 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerName="init" Nov 26 17:18:54 crc kubenswrapper[5010]: E1126 17:18:54.489440 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerName="dnsmasq-dns" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.489456 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerName="dnsmasq-dns" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.489941 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4a27ccd-c8c3-499a-8ce2-463dd5e33842" containerName="dnsmasq-dns" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.491209 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.501875 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.502202 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.502694 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.502760 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.511557 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l"] Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.569470 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.569663 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.569745 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.569853 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm2zv\" (UniqueName: \"kubernetes.io/projected/a38ccb1f-698d-4464-986d-6b2d5ac67beb-kube-api-access-wm2zv\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.671517 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.671869 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.671945 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm2zv\" (UniqueName: \"kubernetes.io/projected/a38ccb1f-698d-4464-986d-6b2d5ac67beb-kube-api-access-wm2zv\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.672086 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.679625 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-ssh-key\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.679637 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-inventory\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.681411 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-pre-adoption-validation-combined-ca-bundle\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.702274 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm2zv\" (UniqueName: \"kubernetes.io/projected/a38ccb1f-698d-4464-986d-6b2d5ac67beb-kube-api-access-wm2zv\") pod \"pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:54 crc kubenswrapper[5010]: I1126 17:18:54.832367 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:18:55 crc kubenswrapper[5010]: I1126 17:18:55.574822 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l"] Nov 26 17:18:55 crc kubenswrapper[5010]: W1126 17:18:55.586089 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda38ccb1f_698d_4464_986d_6b2d5ac67beb.slice/crio-9baaf54a6daece264e7ab96744c2afa73c196a9085fe619928c34495fdcb47ce WatchSource:0}: Error finding container 9baaf54a6daece264e7ab96744c2afa73c196a9085fe619928c34495fdcb47ce: Status 404 returned error can't find the container with id 9baaf54a6daece264e7ab96744c2afa73c196a9085fe619928c34495fdcb47ce Nov 26 17:18:56 crc kubenswrapper[5010]: I1126 17:18:56.476721 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" event={"ID":"a38ccb1f-698d-4464-986d-6b2d5ac67beb","Type":"ContainerStarted","Data":"9baaf54a6daece264e7ab96744c2afa73c196a9085fe619928c34495fdcb47ce"} Nov 26 17:18:59 crc kubenswrapper[5010]: I1126 17:18:59.014522 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7947bf78cc-xwb6q" Nov 26 17:18:59 crc kubenswrapper[5010]: I1126 17:18:59.081064 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dfd49754f-7bmtn"] Nov 26 17:18:59 crc kubenswrapper[5010]: I1126 17:18:59.081341 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" containerName="dnsmasq-dns" containerID="cri-o://c0c442fc75bd1dbe35b1a59baf73efb2a7ef0f04206915a9152b9569e14786b8" gracePeriod=10 Nov 26 17:18:59 crc kubenswrapper[5010]: I1126 17:18:59.510974 5010 generic.go:334] "Generic (PLEG): container finished" podID="62d3894e-761d-4a8d-855f-ced842b8930a" containerID="c0c442fc75bd1dbe35b1a59baf73efb2a7ef0f04206915a9152b9569e14786b8" exitCode=0 Nov 26 17:18:59 crc kubenswrapper[5010]: I1126 17:18:59.511020 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" event={"ID":"62d3894e-761d-4a8d-855f-ced842b8930a","Type":"ContainerDied","Data":"c0c442fc75bd1dbe35b1a59baf73efb2a7ef0f04206915a9152b9569e14786b8"} Nov 26 17:19:03 crc kubenswrapper[5010]: I1126 17:19:03.420044 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.1.174:5353: connect: connection refused" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.282113 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.458906 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-sb\") pod \"62d3894e-761d-4a8d-855f-ced842b8930a\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.458982 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-config\") pod \"62d3894e-761d-4a8d-855f-ced842b8930a\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.459058 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-openstack-cell1\") pod \"62d3894e-761d-4a8d-855f-ced842b8930a\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.459121 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lns8h\" (UniqueName: \"kubernetes.io/projected/62d3894e-761d-4a8d-855f-ced842b8930a-kube-api-access-lns8h\") pod \"62d3894e-761d-4a8d-855f-ced842b8930a\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.459173 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-dns-svc\") pod \"62d3894e-761d-4a8d-855f-ced842b8930a\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.459226 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-nb\") pod \"62d3894e-761d-4a8d-855f-ced842b8930a\" (UID: \"62d3894e-761d-4a8d-855f-ced842b8930a\") " Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.465618 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62d3894e-761d-4a8d-855f-ced842b8930a-kube-api-access-lns8h" (OuterVolumeSpecName: "kube-api-access-lns8h") pod "62d3894e-761d-4a8d-855f-ced842b8930a" (UID: "62d3894e-761d-4a8d-855f-ced842b8930a"). InnerVolumeSpecName "kube-api-access-lns8h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.525806 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-config" (OuterVolumeSpecName: "config") pod "62d3894e-761d-4a8d-855f-ced842b8930a" (UID: "62d3894e-761d-4a8d-855f-ced842b8930a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.526192 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "62d3894e-761d-4a8d-855f-ced842b8930a" (UID: "62d3894e-761d-4a8d-855f-ced842b8930a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.533048 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "62d3894e-761d-4a8d-855f-ced842b8930a" (UID: "62d3894e-761d-4a8d-855f-ced842b8930a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.534481 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-openstack-cell1" (OuterVolumeSpecName: "openstack-cell1") pod "62d3894e-761d-4a8d-855f-ced842b8930a" (UID: "62d3894e-761d-4a8d-855f-ced842b8930a"). InnerVolumeSpecName "openstack-cell1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.546105 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "62d3894e-761d-4a8d-855f-ced842b8930a" (UID: "62d3894e-761d-4a8d-855f-ced842b8930a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.561994 5010 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.562457 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.562474 5010 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.562488 5010 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-config\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.562499 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1\" (UniqueName: \"kubernetes.io/configmap/62d3894e-761d-4a8d-855f-ced842b8930a-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.562514 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lns8h\" (UniqueName: \"kubernetes.io/projected/62d3894e-761d-4a8d-855f-ced842b8930a-kube-api-access-lns8h\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.574461 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" event={"ID":"a38ccb1f-698d-4464-986d-6b2d5ac67beb","Type":"ContainerStarted","Data":"9ebc6735d112f762a229d70017e8c77f79c829472c1b6d1c6e1ac9142b6f1f3f"} Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.578470 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" event={"ID":"62d3894e-761d-4a8d-855f-ced842b8930a","Type":"ContainerDied","Data":"5a9a7525f96138d9a7d22ad45484ed4f1ea653552cc1960024343bf7e0437558"} Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.578749 5010 scope.go:117] "RemoveContainer" containerID="c0c442fc75bd1dbe35b1a59baf73efb2a7ef0f04206915a9152b9569e14786b8" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.578514 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6dfd49754f-7bmtn" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.602386 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" podStartSLOduration=2.220703381 podStartE2EDuration="11.602351409s" podCreationTimestamp="2025-11-26 17:18:54 +0000 UTC" firstStartedPulling="2025-11-26 17:18:55.590570953 +0000 UTC m=+6756.381288111" lastFinishedPulling="2025-11-26 17:19:04.972218951 +0000 UTC m=+6765.762936139" observedRunningTime="2025-11-26 17:19:05.5943625 +0000 UTC m=+6766.385079708" watchObservedRunningTime="2025-11-26 17:19:05.602351409 +0000 UTC m=+6766.393068587" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.619338 5010 scope.go:117] "RemoveContainer" containerID="cc918c61f984f958ced79c62bd1e67e2bff0fba2aac20a2bb44cd6792048160a" Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.633615 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6dfd49754f-7bmtn"] Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.643535 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6dfd49754f-7bmtn"] Nov 26 17:19:05 crc kubenswrapper[5010]: I1126 17:19:05.904601 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" path="/var/lib/kubelet/pods/62d3894e-761d-4a8d-855f-ced842b8930a/volumes" Nov 26 17:19:10 crc kubenswrapper[5010]: I1126 17:19:10.306852 5010 scope.go:117] "RemoveContainer" containerID="f7512cc25f723adf1798cb87edc561375efbf7cd2e745a9d86031836d4706208" Nov 26 17:19:10 crc kubenswrapper[5010]: I1126 17:19:10.382419 5010 scope.go:117] "RemoveContainer" containerID="90b73614f3185e6d610275cdef4e34f2cc5840700c20a450255d47989c55315f" Nov 26 17:19:10 crc kubenswrapper[5010]: I1126 17:19:10.439108 5010 scope.go:117] "RemoveContainer" containerID="b47c459ed43cacd884e84c4d1627087ba95e615cbe67afa829e6809a50ddb406" Nov 26 17:19:10 crc kubenswrapper[5010]: I1126 17:19:10.482325 5010 scope.go:117] "RemoveContainer" containerID="249f63b4814641be5bd617f37a60928ea705cc670db8e0ace0565b513c18c138" Nov 26 17:19:19 crc kubenswrapper[5010]: I1126 17:19:19.761762 5010 generic.go:334] "Generic (PLEG): container finished" podID="a38ccb1f-698d-4464-986d-6b2d5ac67beb" containerID="9ebc6735d112f762a229d70017e8c77f79c829472c1b6d1c6e1ac9142b6f1f3f" exitCode=0 Nov 26 17:19:19 crc kubenswrapper[5010]: I1126 17:19:19.761793 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" event={"ID":"a38ccb1f-698d-4464-986d-6b2d5ac67beb","Type":"ContainerDied","Data":"9ebc6735d112f762a229d70017e8c77f79c829472c1b6d1c6e1ac9142b6f1f3f"} Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.277985 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.349930 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm2zv\" (UniqueName: \"kubernetes.io/projected/a38ccb1f-698d-4464-986d-6b2d5ac67beb-kube-api-access-wm2zv\") pod \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.350084 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-ssh-key\") pod \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.350194 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-inventory\") pod \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.350276 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-pre-adoption-validation-combined-ca-bundle\") pod \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\" (UID: \"a38ccb1f-698d-4464-986d-6b2d5ac67beb\") " Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.357747 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-pre-adoption-validation-combined-ca-bundle" (OuterVolumeSpecName: "pre-adoption-validation-combined-ca-bundle") pod "a38ccb1f-698d-4464-986d-6b2d5ac67beb" (UID: "a38ccb1f-698d-4464-986d-6b2d5ac67beb"). InnerVolumeSpecName "pre-adoption-validation-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.358331 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a38ccb1f-698d-4464-986d-6b2d5ac67beb-kube-api-access-wm2zv" (OuterVolumeSpecName: "kube-api-access-wm2zv") pod "a38ccb1f-698d-4464-986d-6b2d5ac67beb" (UID: "a38ccb1f-698d-4464-986d-6b2d5ac67beb"). InnerVolumeSpecName "kube-api-access-wm2zv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.397697 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-inventory" (OuterVolumeSpecName: "inventory") pod "a38ccb1f-698d-4464-986d-6b2d5ac67beb" (UID: "a38ccb1f-698d-4464-986d-6b2d5ac67beb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.398576 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a38ccb1f-698d-4464-986d-6b2d5ac67beb" (UID: "a38ccb1f-698d-4464-986d-6b2d5ac67beb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.452585 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.452616 5010 reconciler_common.go:293] "Volume detached for volume \"pre-adoption-validation-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-pre-adoption-validation-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.452639 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm2zv\" (UniqueName: \"kubernetes.io/projected/a38ccb1f-698d-4464-986d-6b2d5ac67beb-kube-api-access-wm2zv\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.452650 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a38ccb1f-698d-4464-986d-6b2d5ac67beb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.787367 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" event={"ID":"a38ccb1f-698d-4464-986d-6b2d5ac67beb","Type":"ContainerDied","Data":"9baaf54a6daece264e7ab96744c2afa73c196a9085fe619928c34495fdcb47ce"} Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.787423 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9baaf54a6daece264e7ab96744c2afa73c196a9085fe619928c34495fdcb47ce" Nov 26 17:19:21 crc kubenswrapper[5010]: I1126 17:19:21.787463 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.934124 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm"] Nov 26 17:19:31 crc kubenswrapper[5010]: E1126 17:19:31.935773 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a38ccb1f-698d-4464-986d-6b2d5ac67beb" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.935794 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a38ccb1f-698d-4464-986d-6b2d5ac67beb" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 26 17:19:31 crc kubenswrapper[5010]: E1126 17:19:31.935822 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" containerName="init" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.935830 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" containerName="init" Nov 26 17:19:31 crc kubenswrapper[5010]: E1126 17:19:31.935857 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" containerName="dnsmasq-dns" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.935865 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" containerName="dnsmasq-dns" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.936160 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a38ccb1f-698d-4464-986d-6b2d5ac67beb" containerName="pre-adoption-validation-openstack-pre-adoption-openstack-cell1" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.936182 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="62d3894e-761d-4a8d-855f-ced842b8930a" containerName="dnsmasq-dns" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.937162 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.942701 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.943150 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.943454 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.943623 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:19:31 crc kubenswrapper[5010]: I1126 17:19:31.982803 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm"] Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.102324 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.102415 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.102541 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.102593 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw6fz\" (UniqueName: \"kubernetes.io/projected/484ffc98-a27e-4fc3-9fb9-70c960bd0699-kube-api-access-gw6fz\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.204643 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.204766 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.204837 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.204862 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw6fz\" (UniqueName: \"kubernetes.io/projected/484ffc98-a27e-4fc3-9fb9-70c960bd0699-kube-api-access-gw6fz\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.213475 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-inventory\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.213819 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-ssh-key\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.216161 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-tripleo-cleanup-combined-ca-bundle\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.240451 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw6fz\" (UniqueName: \"kubernetes.io/projected/484ffc98-a27e-4fc3-9fb9-70c960bd0699-kube-api-access-gw6fz\") pod \"tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:32 crc kubenswrapper[5010]: I1126 17:19:32.265578 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:19:33 crc kubenswrapper[5010]: I1126 17:19:33.393473 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm"] Nov 26 17:19:33 crc kubenswrapper[5010]: W1126 17:19:33.395221 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod484ffc98_a27e_4fc3_9fb9_70c960bd0699.slice/crio-b0ce9f13b4c33e358b14cdb0f3b54eea90b775ed2ff581adbe1e4d84ecd0b62a WatchSource:0}: Error finding container b0ce9f13b4c33e358b14cdb0f3b54eea90b775ed2ff581adbe1e4d84ecd0b62a: Status 404 returned error can't find the container with id b0ce9f13b4c33e358b14cdb0f3b54eea90b775ed2ff581adbe1e4d84ecd0b62a Nov 26 17:19:33 crc kubenswrapper[5010]: I1126 17:19:33.980789 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" event={"ID":"484ffc98-a27e-4fc3-9fb9-70c960bd0699","Type":"ContainerStarted","Data":"b0ce9f13b4c33e358b14cdb0f3b54eea90b775ed2ff581adbe1e4d84ecd0b62a"} Nov 26 17:19:35 crc kubenswrapper[5010]: I1126 17:19:34.999574 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" event={"ID":"484ffc98-a27e-4fc3-9fb9-70c960bd0699","Type":"ContainerStarted","Data":"5b9d6224840167d0c4ce1c55781d0894844fea51226289dcf1a3950705dfbacf"} Nov 26 17:19:35 crc kubenswrapper[5010]: I1126 17:19:35.024406 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" podStartSLOduration=3.217794583 podStartE2EDuration="4.024361819s" podCreationTimestamp="2025-11-26 17:19:31 +0000 UTC" firstStartedPulling="2025-11-26 17:19:33.398364849 +0000 UTC m=+6794.189082037" lastFinishedPulling="2025-11-26 17:19:34.204932085 +0000 UTC m=+6794.995649273" observedRunningTime="2025-11-26 17:19:35.022306258 +0000 UTC m=+6795.813023476" watchObservedRunningTime="2025-11-26 17:19:35.024361819 +0000 UTC m=+6795.815078997" Nov 26 17:19:41 crc kubenswrapper[5010]: I1126 17:19:41.423068 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:19:41 crc kubenswrapper[5010]: I1126 17:19:41.423897 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:20:03 crc kubenswrapper[5010]: I1126 17:20:03.056406 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-create-t7vkz"] Nov 26 17:20:03 crc kubenswrapper[5010]: I1126 17:20:03.068314 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-create-t7vkz"] Nov 26 17:20:03 crc kubenswrapper[5010]: I1126 17:20:03.913116 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c26ac4d-4244-4540-8a5e-8edc62cd6db7" path="/var/lib/kubelet/pods/9c26ac4d-4244-4540-8a5e-8edc62cd6db7/volumes" Nov 26 17:20:04 crc kubenswrapper[5010]: I1126 17:20:04.038307 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-60a6-account-create-update-bxfcp"] Nov 26 17:20:04 crc kubenswrapper[5010]: I1126 17:20:04.056814 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-60a6-account-create-update-bxfcp"] Nov 26 17:20:05 crc kubenswrapper[5010]: I1126 17:20:05.912073 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0be84c79-d2c2-4633-8f59-bcf7084e8101" path="/var/lib/kubelet/pods/0be84c79-d2c2-4633-8f59-bcf7084e8101/volumes" Nov 26 17:20:09 crc kubenswrapper[5010]: I1126 17:20:09.035757 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-persistence-db-create-6dnnh"] Nov 26 17:20:09 crc kubenswrapper[5010]: I1126 17:20:09.045177 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-persistence-db-create-6dnnh"] Nov 26 17:20:09 crc kubenswrapper[5010]: I1126 17:20:09.908281 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8687aeb-d22b-4f26-bbb8-24728c45ae09" path="/var/lib/kubelet/pods/e8687aeb-d22b-4f26-bbb8-24728c45ae09/volumes" Nov 26 17:20:10 crc kubenswrapper[5010]: I1126 17:20:10.713830 5010 scope.go:117] "RemoveContainer" containerID="d04f99ef320f57f5bcff4a47246d3e498ce9a44408c4011c353a333a7c3502d3" Nov 26 17:20:10 crc kubenswrapper[5010]: I1126 17:20:10.760679 5010 scope.go:117] "RemoveContainer" containerID="f52930f175673c8f873764aa5f89b2931a37647312831370b490d61e98b56c63" Nov 26 17:20:10 crc kubenswrapper[5010]: I1126 17:20:10.829229 5010 scope.go:117] "RemoveContainer" containerID="ffea1240c5a23741022f28dcbd1610ee339cd5814551280648386730b18f812d" Nov 26 17:20:11 crc kubenswrapper[5010]: I1126 17:20:11.031157 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-1499-account-create-update-zwbjm"] Nov 26 17:20:11 crc kubenswrapper[5010]: I1126 17:20:11.042405 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-1499-account-create-update-zwbjm"] Nov 26 17:20:11 crc kubenswrapper[5010]: I1126 17:20:11.422893 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:20:11 crc kubenswrapper[5010]: I1126 17:20:11.422985 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:20:11 crc kubenswrapper[5010]: I1126 17:20:11.907757 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="371275a0-39e1-4c5c-a68a-44c3e50d5998" path="/var/lib/kubelet/pods/371275a0-39e1-4c5c-a68a-44c3e50d5998/volumes" Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.422570 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.423130 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.423186 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.424244 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef98edb74c5bb6bcfce742aabe11d10cee533dd74639da58dff43195ce6da7ae"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.424305 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://ef98edb74c5bb6bcfce742aabe11d10cee533dd74639da58dff43195ce6da7ae" gracePeriod=600 Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.942845 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="ef98edb74c5bb6bcfce742aabe11d10cee533dd74639da58dff43195ce6da7ae" exitCode=0 Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.943205 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"ef98edb74c5bb6bcfce742aabe11d10cee533dd74639da58dff43195ce6da7ae"} Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.943250 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90"} Nov 26 17:20:41 crc kubenswrapper[5010]: I1126 17:20:41.943279 5010 scope.go:117] "RemoveContainer" containerID="7f50bf4cd1deb810331ebb5e0bed905e4c0b99374d08c35069f1c8cf855a790f" Nov 26 17:21:07 crc kubenswrapper[5010]: I1126 17:21:07.050196 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/octavia-db-sync-p77g4"] Nov 26 17:21:07 crc kubenswrapper[5010]: I1126 17:21:07.059100 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/octavia-db-sync-p77g4"] Nov 26 17:21:07 crc kubenswrapper[5010]: I1126 17:21:07.918578 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14bbaf27-d876-4901-918d-6bc09332f656" path="/var/lib/kubelet/pods/14bbaf27-d876-4901-918d-6bc09332f656/volumes" Nov 26 17:21:10 crc kubenswrapper[5010]: I1126 17:21:10.983435 5010 scope.go:117] "RemoveContainer" containerID="00095defb9449b856e1df11c82884911a6f874f250f7846e5f3d0ff508a95a3b" Nov 26 17:21:11 crc kubenswrapper[5010]: I1126 17:21:11.018879 5010 scope.go:117] "RemoveContainer" containerID="7ed92aa6d195e4871e00b6a05c930ab6c7cb6f083f80d35f1e419dc19ffd2a2b" Nov 26 17:21:11 crc kubenswrapper[5010]: I1126 17:21:11.073171 5010 scope.go:117] "RemoveContainer" containerID="ae58dd5de0cda88047c2ee7de60b8b82dcff86584a102cde62fe688f4226c904" Nov 26 17:21:11 crc kubenswrapper[5010]: I1126 17:21:11.137556 5010 scope.go:117] "RemoveContainer" containerID="ba743c0129581c0ebb93b02da298bfd8bee2ffb16d2112c2ef3ce2ffe6c4133f" Nov 26 17:21:11 crc kubenswrapper[5010]: I1126 17:21:11.191633 5010 scope.go:117] "RemoveContainer" containerID="24eb302e4e2aa089f09b565339ca8e21b763633539189a951f7dfbe6eb80f11e" Nov 26 17:21:11 crc kubenswrapper[5010]: I1126 17:21:11.214277 5010 scope.go:117] "RemoveContainer" containerID="731bbb62ddda444e6336e02fc47c89f7357aead79f6fea293a456e4fd296850f" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.077240 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fznnn"] Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.080635 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.115868 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fznnn"] Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.179054 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-catalog-content\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.179139 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-utilities\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.179333 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njnlm\" (UniqueName: \"kubernetes.io/projected/5c917746-4faf-43f8-8d64-08b5520affad-kube-api-access-njnlm\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.281561 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njnlm\" (UniqueName: \"kubernetes.io/projected/5c917746-4faf-43f8-8d64-08b5520affad-kube-api-access-njnlm\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.282212 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-catalog-content\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.282789 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-catalog-content\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.282992 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-utilities\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.284647 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-utilities\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.312924 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njnlm\" (UniqueName: \"kubernetes.io/projected/5c917746-4faf-43f8-8d64-08b5520affad-kube-api-access-njnlm\") pod \"redhat-marketplace-fznnn\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.430877 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:45 crc kubenswrapper[5010]: I1126 17:21:45.943017 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fznnn"] Nov 26 17:21:46 crc kubenswrapper[5010]: I1126 17:21:46.939586 5010 generic.go:334] "Generic (PLEG): container finished" podID="5c917746-4faf-43f8-8d64-08b5520affad" containerID="fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18" exitCode=0 Nov 26 17:21:46 crc kubenswrapper[5010]: I1126 17:21:46.939650 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fznnn" event={"ID":"5c917746-4faf-43f8-8d64-08b5520affad","Type":"ContainerDied","Data":"fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18"} Nov 26 17:21:46 crc kubenswrapper[5010]: I1126 17:21:46.940139 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fznnn" event={"ID":"5c917746-4faf-43f8-8d64-08b5520affad","Type":"ContainerStarted","Data":"ebc5deed1fdb01f0abc8a296bb8b8250a08b19e03a87a8178f0f5bf58dd410f2"} Nov 26 17:21:46 crc kubenswrapper[5010]: I1126 17:21:46.941950 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:21:48 crc kubenswrapper[5010]: I1126 17:21:48.971207 5010 generic.go:334] "Generic (PLEG): container finished" podID="5c917746-4faf-43f8-8d64-08b5520affad" containerID="9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a" exitCode=0 Nov 26 17:21:48 crc kubenswrapper[5010]: I1126 17:21:48.971422 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fznnn" event={"ID":"5c917746-4faf-43f8-8d64-08b5520affad","Type":"ContainerDied","Data":"9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a"} Nov 26 17:21:49 crc kubenswrapper[5010]: I1126 17:21:49.987008 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fznnn" event={"ID":"5c917746-4faf-43f8-8d64-08b5520affad","Type":"ContainerStarted","Data":"7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77"} Nov 26 17:21:50 crc kubenswrapper[5010]: I1126 17:21:50.007010 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fznnn" podStartSLOduration=2.490880142 podStartE2EDuration="5.006993533s" podCreationTimestamp="2025-11-26 17:21:45 +0000 UTC" firstStartedPulling="2025-11-26 17:21:46.941727058 +0000 UTC m=+6927.732444206" lastFinishedPulling="2025-11-26 17:21:49.457840449 +0000 UTC m=+6930.248557597" observedRunningTime="2025-11-26 17:21:50.0020265 +0000 UTC m=+6930.792743658" watchObservedRunningTime="2025-11-26 17:21:50.006993533 +0000 UTC m=+6930.797710681" Nov 26 17:21:55 crc kubenswrapper[5010]: I1126 17:21:55.431910 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:55 crc kubenswrapper[5010]: I1126 17:21:55.432531 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:55 crc kubenswrapper[5010]: I1126 17:21:55.509865 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:56 crc kubenswrapper[5010]: I1126 17:21:56.112120 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:56 crc kubenswrapper[5010]: I1126 17:21:56.169491 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fznnn"] Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.072081 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fznnn" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="registry-server" containerID="cri-o://7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77" gracePeriod=2 Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.685000 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.802333 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-utilities\") pod \"5c917746-4faf-43f8-8d64-08b5520affad\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.802701 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-catalog-content\") pod \"5c917746-4faf-43f8-8d64-08b5520affad\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.802824 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-njnlm\" (UniqueName: \"kubernetes.io/projected/5c917746-4faf-43f8-8d64-08b5520affad-kube-api-access-njnlm\") pod \"5c917746-4faf-43f8-8d64-08b5520affad\" (UID: \"5c917746-4faf-43f8-8d64-08b5520affad\") " Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.804586 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-utilities" (OuterVolumeSpecName: "utilities") pod "5c917746-4faf-43f8-8d64-08b5520affad" (UID: "5c917746-4faf-43f8-8d64-08b5520affad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.808847 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c917746-4faf-43f8-8d64-08b5520affad-kube-api-access-njnlm" (OuterVolumeSpecName: "kube-api-access-njnlm") pod "5c917746-4faf-43f8-8d64-08b5520affad" (UID: "5c917746-4faf-43f8-8d64-08b5520affad"). InnerVolumeSpecName "kube-api-access-njnlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.835604 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5c917746-4faf-43f8-8d64-08b5520affad" (UID: "5c917746-4faf-43f8-8d64-08b5520affad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.906030 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-njnlm\" (UniqueName: \"kubernetes.io/projected/5c917746-4faf-43f8-8d64-08b5520affad-kube-api-access-njnlm\") on node \"crc\" DevicePath \"\"" Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.906076 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:21:58 crc kubenswrapper[5010]: I1126 17:21:58.906090 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5c917746-4faf-43f8-8d64-08b5520affad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.084887 5010 generic.go:334] "Generic (PLEG): container finished" podID="5c917746-4faf-43f8-8d64-08b5520affad" containerID="7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77" exitCode=0 Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.084935 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fznnn" event={"ID":"5c917746-4faf-43f8-8d64-08b5520affad","Type":"ContainerDied","Data":"7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77"} Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.084972 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fznnn" event={"ID":"5c917746-4faf-43f8-8d64-08b5520affad","Type":"ContainerDied","Data":"ebc5deed1fdb01f0abc8a296bb8b8250a08b19e03a87a8178f0f5bf58dd410f2"} Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.084992 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fznnn" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.085000 5010 scope.go:117] "RemoveContainer" containerID="7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.112426 5010 scope.go:117] "RemoveContainer" containerID="9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.124547 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fznnn"] Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.133972 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fznnn"] Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.140818 5010 scope.go:117] "RemoveContainer" containerID="fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.196429 5010 scope.go:117] "RemoveContainer" containerID="7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77" Nov 26 17:21:59 crc kubenswrapper[5010]: E1126 17:21:59.197261 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77\": container with ID starting with 7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77 not found: ID does not exist" containerID="7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.197298 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77"} err="failed to get container status \"7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77\": rpc error: code = NotFound desc = could not find container \"7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77\": container with ID starting with 7ba0a7c522138a90d3c78ab44833e25d1759cdb8784cf69bc2b51fccb2abba77 not found: ID does not exist" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.197323 5010 scope.go:117] "RemoveContainer" containerID="9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a" Nov 26 17:21:59 crc kubenswrapper[5010]: E1126 17:21:59.197690 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a\": container with ID starting with 9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a not found: ID does not exist" containerID="9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.197757 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a"} err="failed to get container status \"9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a\": rpc error: code = NotFound desc = could not find container \"9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a\": container with ID starting with 9080b3f16e100813a57d90cc3a79e0a9500f9b171000f679773977546ab0c63a not found: ID does not exist" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.197783 5010 scope.go:117] "RemoveContainer" containerID="fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18" Nov 26 17:21:59 crc kubenswrapper[5010]: E1126 17:21:59.199243 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18\": container with ID starting with fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18 not found: ID does not exist" containerID="fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.199273 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18"} err="failed to get container status \"fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18\": rpc error: code = NotFound desc = could not find container \"fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18\": container with ID starting with fca5a13dbe971f9deb53422cd05d184a766ca3821e4371a3306f8ad7fa26df18 not found: ID does not exist" Nov 26 17:21:59 crc kubenswrapper[5010]: I1126 17:21:59.908893 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c917746-4faf-43f8-8d64-08b5520affad" path="/var/lib/kubelet/pods/5c917746-4faf-43f8-8d64-08b5520affad/volumes" Nov 26 17:22:41 crc kubenswrapper[5010]: I1126 17:22:41.423199 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:22:41 crc kubenswrapper[5010]: I1126 17:22:41.423698 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:23:11 crc kubenswrapper[5010]: I1126 17:23:11.422613 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:23:11 crc kubenswrapper[5010]: I1126 17:23:11.423339 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:23:41 crc kubenswrapper[5010]: I1126 17:23:41.423240 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:23:41 crc kubenswrapper[5010]: I1126 17:23:41.424144 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:23:41 crc kubenswrapper[5010]: I1126 17:23:41.424226 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:23:41 crc kubenswrapper[5010]: I1126 17:23:41.425793 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:23:41 crc kubenswrapper[5010]: I1126 17:23:41.426142 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" gracePeriod=600 Nov 26 17:23:41 crc kubenswrapper[5010]: E1126 17:23:41.557332 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:23:42 crc kubenswrapper[5010]: I1126 17:23:42.435085 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" exitCode=0 Nov 26 17:23:42 crc kubenswrapper[5010]: I1126 17:23:42.435160 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90"} Nov 26 17:23:42 crc kubenswrapper[5010]: I1126 17:23:42.435395 5010 scope.go:117] "RemoveContainer" containerID="ef98edb74c5bb6bcfce742aabe11d10cee533dd74639da58dff43195ce6da7ae" Nov 26 17:23:42 crc kubenswrapper[5010]: I1126 17:23:42.435924 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:23:42 crc kubenswrapper[5010]: E1126 17:23:42.436223 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:23:55 crc kubenswrapper[5010]: I1126 17:23:55.892411 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:23:55 crc kubenswrapper[5010]: E1126 17:23:55.893614 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:24:08 crc kubenswrapper[5010]: I1126 17:24:08.891888 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:24:08 crc kubenswrapper[5010]: E1126 17:24:08.892607 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:24:11 crc kubenswrapper[5010]: I1126 17:24:11.489224 5010 scope.go:117] "RemoveContainer" containerID="15984535f5b1be2bbdeeef33e6c11d7c02a603059c8c3f0d5a70e5c15ef05e3c" Nov 26 17:24:11 crc kubenswrapper[5010]: I1126 17:24:11.526193 5010 scope.go:117] "RemoveContainer" containerID="a203b4e22ad07366684f61421f94f25270925fa7a46a7b34a3385840e91e1a62" Nov 26 17:24:11 crc kubenswrapper[5010]: I1126 17:24:11.556145 5010 scope.go:117] "RemoveContainer" containerID="6449e48989147c3fe1745e269f01c58c095b0851a80395cd4e1ed29a381df9e6" Nov 26 17:24:11 crc kubenswrapper[5010]: I1126 17:24:11.591032 5010 scope.go:117] "RemoveContainer" containerID="279ba6ed73948cd811917f9b541a7801fad85f1fe12d899ef2cb006180895dc3" Nov 26 17:24:20 crc kubenswrapper[5010]: I1126 17:24:20.892242 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:24:20 crc kubenswrapper[5010]: E1126 17:24:20.893263 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:24:21 crc kubenswrapper[5010]: I1126 17:24:21.061822 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-2bc7-account-create-update-n6r49"] Nov 26 17:24:21 crc kubenswrapper[5010]: I1126 17:24:21.074107 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-nj5lp"] Nov 26 17:24:21 crc kubenswrapper[5010]: I1126 17:24:21.086074 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-2bc7-account-create-update-n6r49"] Nov 26 17:24:21 crc kubenswrapper[5010]: I1126 17:24:21.101912 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-nj5lp"] Nov 26 17:24:21 crc kubenswrapper[5010]: I1126 17:24:21.913953 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1875832b-7338-4ab0-bb6d-445884217d0e" path="/var/lib/kubelet/pods/1875832b-7338-4ab0-bb6d-445884217d0e/volumes" Nov 26 17:24:21 crc kubenswrapper[5010]: I1126 17:24:21.917146 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31f53da5-3952-4e07-8495-c864c959ac7d" path="/var/lib/kubelet/pods/31f53da5-3952-4e07-8495-c864c959ac7d/volumes" Nov 26 17:24:32 crc kubenswrapper[5010]: I1126 17:24:32.891383 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:24:32 crc kubenswrapper[5010]: E1126 17:24:32.892335 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:24:37 crc kubenswrapper[5010]: I1126 17:24:37.062823 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-8k2tw"] Nov 26 17:24:37 crc kubenswrapper[5010]: I1126 17:24:37.077066 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-8k2tw"] Nov 26 17:24:37 crc kubenswrapper[5010]: I1126 17:24:37.908738 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f1bd435-d206-440d-8054-83fe2688501a" path="/var/lib/kubelet/pods/0f1bd435-d206-440d-8054-83fe2688501a/volumes" Nov 26 17:24:47 crc kubenswrapper[5010]: I1126 17:24:47.892247 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:24:47 crc kubenswrapper[5010]: E1126 17:24:47.893327 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:25:02 crc kubenswrapper[5010]: I1126 17:25:02.892570 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:25:02 crc kubenswrapper[5010]: E1126 17:25:02.893475 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:25:11 crc kubenswrapper[5010]: I1126 17:25:11.672125 5010 scope.go:117] "RemoveContainer" containerID="f3fc7137ccf729cc44a262af335e9f6165e56831e915fcc52cb9fd5b5218216a" Nov 26 17:25:11 crc kubenswrapper[5010]: I1126 17:25:11.715101 5010 scope.go:117] "RemoveContainer" containerID="efddce0f7bba9050137d414ab8c3df851156bd00e8187e9d654f9e5f4a3b6987" Nov 26 17:25:11 crc kubenswrapper[5010]: I1126 17:25:11.763198 5010 scope.go:117] "RemoveContainer" containerID="b4199fd063c93076abb075ea068dd7d553b535286a3fbc7e2e601f09adad8acd" Nov 26 17:25:13 crc kubenswrapper[5010]: I1126 17:25:13.892201 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:25:13 crc kubenswrapper[5010]: E1126 17:25:13.893172 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:25:28 crc kubenswrapper[5010]: I1126 17:25:28.892501 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:25:28 crc kubenswrapper[5010]: E1126 17:25:28.893685 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:25:42 crc kubenswrapper[5010]: I1126 17:25:42.891249 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:25:42 crc kubenswrapper[5010]: E1126 17:25:42.891920 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:25:56 crc kubenswrapper[5010]: I1126 17:25:56.891851 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:25:56 crc kubenswrapper[5010]: E1126 17:25:56.892764 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.823413 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-dzfdl"] Nov 26 17:25:58 crc kubenswrapper[5010]: E1126 17:25:58.824222 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="extract-utilities" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.824239 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="extract-utilities" Nov 26 17:25:58 crc kubenswrapper[5010]: E1126 17:25:58.824291 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="extract-content" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.824300 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="extract-content" Nov 26 17:25:58 crc kubenswrapper[5010]: E1126 17:25:58.824314 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="registry-server" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.824324 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="registry-server" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.824573 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c917746-4faf-43f8-8d64-08b5520affad" containerName="registry-server" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.826672 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.848233 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dzfdl"] Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.919722 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-utilities\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.919879 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqmfz\" (UniqueName: \"kubernetes.io/projected/e3b38217-e900-42bf-8262-86d0dbd4c07c-kube-api-access-sqmfz\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:58 crc kubenswrapper[5010]: I1126 17:25:58.919949 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-catalog-content\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.021076 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-utilities\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.021197 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqmfz\" (UniqueName: \"kubernetes.io/projected/e3b38217-e900-42bf-8262-86d0dbd4c07c-kube-api-access-sqmfz\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.021262 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-catalog-content\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.021934 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-utilities\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.021977 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-catalog-content\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.040347 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqmfz\" (UniqueName: \"kubernetes.io/projected/e3b38217-e900-42bf-8262-86d0dbd4c07c-kube-api-access-sqmfz\") pod \"community-operators-dzfdl\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.165487 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:25:59 crc kubenswrapper[5010]: I1126 17:25:59.652683 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-dzfdl"] Nov 26 17:26:00 crc kubenswrapper[5010]: I1126 17:26:00.017020 5010 generic.go:334] "Generic (PLEG): container finished" podID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerID="87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576" exitCode=0 Nov 26 17:26:00 crc kubenswrapper[5010]: I1126 17:26:00.017100 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dzfdl" event={"ID":"e3b38217-e900-42bf-8262-86d0dbd4c07c","Type":"ContainerDied","Data":"87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576"} Nov 26 17:26:00 crc kubenswrapper[5010]: I1126 17:26:00.017584 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dzfdl" event={"ID":"e3b38217-e900-42bf-8262-86d0dbd4c07c","Type":"ContainerStarted","Data":"e219f09d027a0fb0adef80a64277506826e42d8969fb4bc74d1016693a591227"} Nov 26 17:26:02 crc kubenswrapper[5010]: I1126 17:26:02.043128 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dzfdl" event={"ID":"e3b38217-e900-42bf-8262-86d0dbd4c07c","Type":"ContainerStarted","Data":"ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e"} Nov 26 17:26:03 crc kubenswrapper[5010]: I1126 17:26:03.054253 5010 generic.go:334] "Generic (PLEG): container finished" podID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerID="ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e" exitCode=0 Nov 26 17:26:03 crc kubenswrapper[5010]: I1126 17:26:03.054300 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dzfdl" event={"ID":"e3b38217-e900-42bf-8262-86d0dbd4c07c","Type":"ContainerDied","Data":"ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e"} Nov 26 17:26:04 crc kubenswrapper[5010]: I1126 17:26:04.067636 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dzfdl" event={"ID":"e3b38217-e900-42bf-8262-86d0dbd4c07c","Type":"ContainerStarted","Data":"45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35"} Nov 26 17:26:04 crc kubenswrapper[5010]: I1126 17:26:04.084158 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-dzfdl" podStartSLOduration=2.596451474 podStartE2EDuration="6.084138077s" podCreationTimestamp="2025-11-26 17:25:58 +0000 UTC" firstStartedPulling="2025-11-26 17:26:00.021471018 +0000 UTC m=+7180.812188166" lastFinishedPulling="2025-11-26 17:26:03.509157621 +0000 UTC m=+7184.299874769" observedRunningTime="2025-11-26 17:26:04.083248645 +0000 UTC m=+7184.873965793" watchObservedRunningTime="2025-11-26 17:26:04.084138077 +0000 UTC m=+7184.874855225" Nov 26 17:26:07 crc kubenswrapper[5010]: I1126 17:26:07.892522 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:26:07 crc kubenswrapper[5010]: E1126 17:26:07.893314 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:26:09 crc kubenswrapper[5010]: I1126 17:26:09.166010 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:26:09 crc kubenswrapper[5010]: I1126 17:26:09.167809 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:26:09 crc kubenswrapper[5010]: I1126 17:26:09.231123 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:26:10 crc kubenswrapper[5010]: I1126 17:26:10.188191 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:26:10 crc kubenswrapper[5010]: I1126 17:26:10.250243 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dzfdl"] Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.159897 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-dzfdl" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="registry-server" containerID="cri-o://45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35" gracePeriod=2 Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.683422 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.882787 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sqmfz\" (UniqueName: \"kubernetes.io/projected/e3b38217-e900-42bf-8262-86d0dbd4c07c-kube-api-access-sqmfz\") pod \"e3b38217-e900-42bf-8262-86d0dbd4c07c\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.882980 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-catalog-content\") pod \"e3b38217-e900-42bf-8262-86d0dbd4c07c\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.883151 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-utilities\") pod \"e3b38217-e900-42bf-8262-86d0dbd4c07c\" (UID: \"e3b38217-e900-42bf-8262-86d0dbd4c07c\") " Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.884082 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-utilities" (OuterVolumeSpecName: "utilities") pod "e3b38217-e900-42bf-8262-86d0dbd4c07c" (UID: "e3b38217-e900-42bf-8262-86d0dbd4c07c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.894003 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3b38217-e900-42bf-8262-86d0dbd4c07c-kube-api-access-sqmfz" (OuterVolumeSpecName: "kube-api-access-sqmfz") pod "e3b38217-e900-42bf-8262-86d0dbd4c07c" (UID: "e3b38217-e900-42bf-8262-86d0dbd4c07c"). InnerVolumeSpecName "kube-api-access-sqmfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.943894 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3b38217-e900-42bf-8262-86d0dbd4c07c" (UID: "e3b38217-e900-42bf-8262-86d0dbd4c07c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.986117 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sqmfz\" (UniqueName: \"kubernetes.io/projected/e3b38217-e900-42bf-8262-86d0dbd4c07c-kube-api-access-sqmfz\") on node \"crc\" DevicePath \"\"" Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.986143 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:26:12 crc kubenswrapper[5010]: I1126 17:26:12.986152 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3b38217-e900-42bf-8262-86d0dbd4c07c-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.170347 5010 generic.go:334] "Generic (PLEG): container finished" podID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerID="45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35" exitCode=0 Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.170395 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-dzfdl" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.170425 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dzfdl" event={"ID":"e3b38217-e900-42bf-8262-86d0dbd4c07c","Type":"ContainerDied","Data":"45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35"} Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.172047 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-dzfdl" event={"ID":"e3b38217-e900-42bf-8262-86d0dbd4c07c","Type":"ContainerDied","Data":"e219f09d027a0fb0adef80a64277506826e42d8969fb4bc74d1016693a591227"} Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.172104 5010 scope.go:117] "RemoveContainer" containerID="45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.211639 5010 scope.go:117] "RemoveContainer" containerID="ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.252257 5010 scope.go:117] "RemoveContainer" containerID="87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.253908 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-dzfdl"] Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.270797 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-dzfdl"] Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.298778 5010 scope.go:117] "RemoveContainer" containerID="45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35" Nov 26 17:26:13 crc kubenswrapper[5010]: E1126 17:26:13.299302 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35\": container with ID starting with 45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35 not found: ID does not exist" containerID="45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.299359 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35"} err="failed to get container status \"45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35\": rpc error: code = NotFound desc = could not find container \"45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35\": container with ID starting with 45b3969e34950031bfd2fd4202200c4ecfca1afe259c38bcc112a4456f3fac35 not found: ID does not exist" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.299393 5010 scope.go:117] "RemoveContainer" containerID="ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e" Nov 26 17:26:13 crc kubenswrapper[5010]: E1126 17:26:13.299787 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e\": container with ID starting with ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e not found: ID does not exist" containerID="ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.299826 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e"} err="failed to get container status \"ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e\": rpc error: code = NotFound desc = could not find container \"ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e\": container with ID starting with ae763cef71affe680cef6b570833827874768b18d8f3e85cce800019f9fdcc3e not found: ID does not exist" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.299851 5010 scope.go:117] "RemoveContainer" containerID="87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576" Nov 26 17:26:13 crc kubenswrapper[5010]: E1126 17:26:13.300182 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576\": container with ID starting with 87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576 not found: ID does not exist" containerID="87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.300221 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576"} err="failed to get container status \"87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576\": rpc error: code = NotFound desc = could not find container \"87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576\": container with ID starting with 87de12068932ced7dcde07656c61256610a1b72488d9f348b24bc250d0b58576 not found: ID does not exist" Nov 26 17:26:13 crc kubenswrapper[5010]: I1126 17:26:13.924321 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" path="/var/lib/kubelet/pods/e3b38217-e900-42bf-8262-86d0dbd4c07c/volumes" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.350970 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jpwlc"] Nov 26 17:26:18 crc kubenswrapper[5010]: E1126 17:26:18.352168 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="registry-server" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.352190 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="registry-server" Nov 26 17:26:18 crc kubenswrapper[5010]: E1126 17:26:18.352211 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="extract-content" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.352262 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="extract-content" Nov 26 17:26:18 crc kubenswrapper[5010]: E1126 17:26:18.352285 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="extract-utilities" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.352294 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="extract-utilities" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.352553 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3b38217-e900-42bf-8262-86d0dbd4c07c" containerName="registry-server" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.354504 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.363171 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jpwlc"] Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.413574 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.413648 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-utilities\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.413693 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cmdm\" (UniqueName: \"kubernetes.io/projected/889825f0-2b51-4d9e-bb0c-832c706ebe9a-kube-api-access-6cmdm\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.523032 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.523161 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-utilities\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.523234 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cmdm\" (UniqueName: \"kubernetes.io/projected/889825f0-2b51-4d9e-bb0c-832c706ebe9a-kube-api-access-6cmdm\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.523747 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-utilities\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.523807 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.549769 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cmdm\" (UniqueName: \"kubernetes.io/projected/889825f0-2b51-4d9e-bb0c-832c706ebe9a-kube-api-access-6cmdm\") pod \"redhat-operators-jpwlc\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:18 crc kubenswrapper[5010]: I1126 17:26:18.704531 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:19 crc kubenswrapper[5010]: I1126 17:26:19.211365 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jpwlc"] Nov 26 17:26:19 crc kubenswrapper[5010]: I1126 17:26:19.232658 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jpwlc" event={"ID":"889825f0-2b51-4d9e-bb0c-832c706ebe9a","Type":"ContainerStarted","Data":"5120f9f0d359f1b1c75905c119accfc818d737ef880a31569306c5cd5662f6b8"} Nov 26 17:26:20 crc kubenswrapper[5010]: I1126 17:26:20.245198 5010 generic.go:334] "Generic (PLEG): container finished" podID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerID="629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a" exitCode=0 Nov 26 17:26:20 crc kubenswrapper[5010]: I1126 17:26:20.245256 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jpwlc" event={"ID":"889825f0-2b51-4d9e-bb0c-832c706ebe9a","Type":"ContainerDied","Data":"629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a"} Nov 26 17:26:21 crc kubenswrapper[5010]: I1126 17:26:21.258910 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jpwlc" event={"ID":"889825f0-2b51-4d9e-bb0c-832c706ebe9a","Type":"ContainerStarted","Data":"e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122"} Nov 26 17:26:21 crc kubenswrapper[5010]: I1126 17:26:21.892595 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:26:21 crc kubenswrapper[5010]: E1126 17:26:21.892999 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:26:26 crc kubenswrapper[5010]: I1126 17:26:26.310996 5010 generic.go:334] "Generic (PLEG): container finished" podID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerID="e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122" exitCode=0 Nov 26 17:26:26 crc kubenswrapper[5010]: I1126 17:26:26.311069 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jpwlc" event={"ID":"889825f0-2b51-4d9e-bb0c-832c706ebe9a","Type":"ContainerDied","Data":"e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122"} Nov 26 17:26:27 crc kubenswrapper[5010]: I1126 17:26:27.323097 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jpwlc" event={"ID":"889825f0-2b51-4d9e-bb0c-832c706ebe9a","Type":"ContainerStarted","Data":"11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533"} Nov 26 17:26:27 crc kubenswrapper[5010]: I1126 17:26:27.348905 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jpwlc" podStartSLOduration=2.80350176 podStartE2EDuration="9.348889632s" podCreationTimestamp="2025-11-26 17:26:18 +0000 UTC" firstStartedPulling="2025-11-26 17:26:20.247719722 +0000 UTC m=+7201.038436870" lastFinishedPulling="2025-11-26 17:26:26.793107584 +0000 UTC m=+7207.583824742" observedRunningTime="2025-11-26 17:26:27.341271777 +0000 UTC m=+7208.131988925" watchObservedRunningTime="2025-11-26 17:26:27.348889632 +0000 UTC m=+7208.139606780" Nov 26 17:26:28 crc kubenswrapper[5010]: I1126 17:26:28.705643 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:28 crc kubenswrapper[5010]: I1126 17:26:28.705988 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:29 crc kubenswrapper[5010]: I1126 17:26:29.756637 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jpwlc" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="registry-server" probeResult="failure" output=< Nov 26 17:26:29 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:26:29 crc kubenswrapper[5010]: > Nov 26 17:26:32 crc kubenswrapper[5010]: I1126 17:26:32.891733 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:26:32 crc kubenswrapper[5010]: E1126 17:26:32.892660 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:26:38 crc kubenswrapper[5010]: I1126 17:26:38.784632 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:38 crc kubenswrapper[5010]: I1126 17:26:38.879507 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:39 crc kubenswrapper[5010]: I1126 17:26:39.038492 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jpwlc"] Nov 26 17:26:40 crc kubenswrapper[5010]: I1126 17:26:40.454361 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jpwlc" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="registry-server" containerID="cri-o://11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533" gracePeriod=2 Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.401104 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.464208 5010 generic.go:334] "Generic (PLEG): container finished" podID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerID="11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533" exitCode=0 Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.464267 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jpwlc" event={"ID":"889825f0-2b51-4d9e-bb0c-832c706ebe9a","Type":"ContainerDied","Data":"11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533"} Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.464309 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jpwlc" event={"ID":"889825f0-2b51-4d9e-bb0c-832c706ebe9a","Type":"ContainerDied","Data":"5120f9f0d359f1b1c75905c119accfc818d737ef880a31569306c5cd5662f6b8"} Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.464339 5010 scope.go:117] "RemoveContainer" containerID="11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.464958 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jpwlc" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.485297 5010 scope.go:117] "RemoveContainer" containerID="e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.503951 5010 scope.go:117] "RemoveContainer" containerID="629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.530664 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-utilities\") pod \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.530765 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cmdm\" (UniqueName: \"kubernetes.io/projected/889825f0-2b51-4d9e-bb0c-832c706ebe9a-kube-api-access-6cmdm\") pod \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.530926 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content\") pod \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.531435 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-utilities" (OuterVolumeSpecName: "utilities") pod "889825f0-2b51-4d9e-bb0c-832c706ebe9a" (UID: "889825f0-2b51-4d9e-bb0c-832c706ebe9a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.539108 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/889825f0-2b51-4d9e-bb0c-832c706ebe9a-kube-api-access-6cmdm" (OuterVolumeSpecName: "kube-api-access-6cmdm") pod "889825f0-2b51-4d9e-bb0c-832c706ebe9a" (UID: "889825f0-2b51-4d9e-bb0c-832c706ebe9a"). InnerVolumeSpecName "kube-api-access-6cmdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.628506 5010 scope.go:117] "RemoveContainer" containerID="11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533" Nov 26 17:26:41 crc kubenswrapper[5010]: E1126 17:26:41.629211 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533\": container with ID starting with 11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533 not found: ID does not exist" containerID="11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.629289 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533"} err="failed to get container status \"11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533\": rpc error: code = NotFound desc = could not find container \"11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533\": container with ID starting with 11dc947f336d629c6707d78e717d10f2f59edc51fe9746ec5b91f194ef271533 not found: ID does not exist" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.629338 5010 scope.go:117] "RemoveContainer" containerID="e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122" Nov 26 17:26:41 crc kubenswrapper[5010]: E1126 17:26:41.630054 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122\": container with ID starting with e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122 not found: ID does not exist" containerID="e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.630108 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122"} err="failed to get container status \"e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122\": rpc error: code = NotFound desc = could not find container \"e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122\": container with ID starting with e7b4256b9e2b9ac48bd80653f8ab33c0dcd2b0b39f921413097a743dc6b5e122 not found: ID does not exist" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.630146 5010 scope.go:117] "RemoveContainer" containerID="629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a" Nov 26 17:26:41 crc kubenswrapper[5010]: E1126 17:26:41.630564 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a\": container with ID starting with 629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a not found: ID does not exist" containerID="629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.630607 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a"} err="failed to get container status \"629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a\": rpc error: code = NotFound desc = could not find container \"629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a\": container with ID starting with 629d87faf6e1f354a51ff7897a0b4e08cce14a9d45a654752cb2cc4d6388931a not found: ID does not exist" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.632336 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "889825f0-2b51-4d9e-bb0c-832c706ebe9a" (UID: "889825f0-2b51-4d9e-bb0c-832c706ebe9a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.632493 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content\") pod \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\" (UID: \"889825f0-2b51-4d9e-bb0c-832c706ebe9a\") " Nov 26 17:26:41 crc kubenswrapper[5010]: W1126 17:26:41.632681 5010 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/889825f0-2b51-4d9e-bb0c-832c706ebe9a/volumes/kubernetes.io~empty-dir/catalog-content Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.632817 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "889825f0-2b51-4d9e-bb0c-832c706ebe9a" (UID: "889825f0-2b51-4d9e-bb0c-832c706ebe9a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.633368 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.633393 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cmdm\" (UniqueName: \"kubernetes.io/projected/889825f0-2b51-4d9e-bb0c-832c706ebe9a-kube-api-access-6cmdm\") on node \"crc\" DevicePath \"\"" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.633433 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/889825f0-2b51-4d9e-bb0c-832c706ebe9a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.806606 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jpwlc"] Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.814446 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jpwlc"] Nov 26 17:26:41 crc kubenswrapper[5010]: I1126 17:26:41.904465 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" path="/var/lib/kubelet/pods/889825f0-2b51-4d9e-bb0c-832c706ebe9a/volumes" Nov 26 17:26:45 crc kubenswrapper[5010]: I1126 17:26:45.891997 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:26:45 crc kubenswrapper[5010]: E1126 17:26:45.892956 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:26:56 crc kubenswrapper[5010]: I1126 17:26:56.892049 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:26:56 crc kubenswrapper[5010]: E1126 17:26:56.894527 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:27:11 crc kubenswrapper[5010]: I1126 17:27:11.892397 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:27:11 crc kubenswrapper[5010]: E1126 17:27:11.893393 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:27:25 crc kubenswrapper[5010]: I1126 17:27:25.018657 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:27:25 crc kubenswrapper[5010]: E1126 17:27:25.020995 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:27:32 crc kubenswrapper[5010]: I1126 17:27:32.055287 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-e014-account-create-update-gcfkg"] Nov 26 17:27:32 crc kubenswrapper[5010]: I1126 17:27:32.069177 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-kkbjt"] Nov 26 17:27:32 crc kubenswrapper[5010]: I1126 17:27:32.079869 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-e014-account-create-update-gcfkg"] Nov 26 17:27:32 crc kubenswrapper[5010]: I1126 17:27:32.088424 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-kkbjt"] Nov 26 17:27:33 crc kubenswrapper[5010]: I1126 17:27:33.908408 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6697bc34-58f2-4daf-b940-1d78a44566e4" path="/var/lib/kubelet/pods/6697bc34-58f2-4daf-b940-1d78a44566e4/volumes" Nov 26 17:27:33 crc kubenswrapper[5010]: I1126 17:27:33.909685 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83d1625c-1b09-40b6-8c61-dd86d17becf1" path="/var/lib/kubelet/pods/83d1625c-1b09-40b6-8c61-dd86d17becf1/volumes" Nov 26 17:27:38 crc kubenswrapper[5010]: I1126 17:27:38.891685 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:27:38 crc kubenswrapper[5010]: E1126 17:27:38.892636 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:27:44 crc kubenswrapper[5010]: I1126 17:27:44.052235 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-rvmvt"] Nov 26 17:27:44 crc kubenswrapper[5010]: I1126 17:27:44.066859 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-rvmvt"] Nov 26 17:27:45 crc kubenswrapper[5010]: I1126 17:27:45.911688 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="642a032a-cc39-475a-87b2-0d5d25c24b04" path="/var/lib/kubelet/pods/642a032a-cc39-475a-87b2-0d5d25c24b04/volumes" Nov 26 17:27:53 crc kubenswrapper[5010]: I1126 17:27:53.892614 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:27:53 crc kubenswrapper[5010]: E1126 17:27:53.893453 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:28:05 crc kubenswrapper[5010]: I1126 17:28:05.892264 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:28:05 crc kubenswrapper[5010]: E1126 17:28:05.893024 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:28:11 crc kubenswrapper[5010]: I1126 17:28:11.990444 5010 scope.go:117] "RemoveContainer" containerID="664c8520987d7ea21f3cd43367fe35690af16d17ac311f390b0db78e51cc475f" Nov 26 17:28:12 crc kubenswrapper[5010]: I1126 17:28:12.042532 5010 scope.go:117] "RemoveContainer" containerID="905a06598263d807dec2785e9f03a9e51270d15ca49adda007cc6497e3aec5af" Nov 26 17:28:12 crc kubenswrapper[5010]: I1126 17:28:12.085628 5010 scope.go:117] "RemoveContainer" containerID="4e00a440f8277f4633ed7bf67fd56c380b77c9d990870016072e44a7b68c2458" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.776109 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c8fm9"] Nov 26 17:28:18 crc kubenswrapper[5010]: E1126 17:28:18.777899 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="extract-content" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.777934 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="extract-content" Nov 26 17:28:18 crc kubenswrapper[5010]: E1126 17:28:18.778025 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="extract-utilities" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.778046 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="extract-utilities" Nov 26 17:28:18 crc kubenswrapper[5010]: E1126 17:28:18.778103 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="registry-server" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.778121 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="registry-server" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.778670 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="889825f0-2b51-4d9e-bb0c-832c706ebe9a" containerName="registry-server" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.783045 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.796757 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c8fm9"] Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.894005 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:28:18 crc kubenswrapper[5010]: E1126 17:28:18.894275 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.985096 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59db6\" (UniqueName: \"kubernetes.io/projected/a53dffa9-5f15-4495-a017-c496e0218280-kube-api-access-59db6\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.985692 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-utilities\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:18 crc kubenswrapper[5010]: I1126 17:28:18.986474 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-catalog-content\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.088804 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-catalog-content\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.089281 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-catalog-content\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.089319 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59db6\" (UniqueName: \"kubernetes.io/projected/a53dffa9-5f15-4495-a017-c496e0218280-kube-api-access-59db6\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.089408 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-utilities\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.089819 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-utilities\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.109240 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59db6\" (UniqueName: \"kubernetes.io/projected/a53dffa9-5f15-4495-a017-c496e0218280-kube-api-access-59db6\") pod \"certified-operators-c8fm9\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.135437 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.663889 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c8fm9"] Nov 26 17:28:19 crc kubenswrapper[5010]: I1126 17:28:19.690540 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8fm9" event={"ID":"a53dffa9-5f15-4495-a017-c496e0218280","Type":"ContainerStarted","Data":"ec8861d86f9ad49c771244ef1c09fa3d1f4e1505742e92e4f1225f780132e27f"} Nov 26 17:28:20 crc kubenswrapper[5010]: I1126 17:28:20.704546 5010 generic.go:334] "Generic (PLEG): container finished" podID="a53dffa9-5f15-4495-a017-c496e0218280" containerID="034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355" exitCode=0 Nov 26 17:28:20 crc kubenswrapper[5010]: I1126 17:28:20.704633 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8fm9" event={"ID":"a53dffa9-5f15-4495-a017-c496e0218280","Type":"ContainerDied","Data":"034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355"} Nov 26 17:28:20 crc kubenswrapper[5010]: I1126 17:28:20.708068 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:28:25 crc kubenswrapper[5010]: I1126 17:28:25.756858 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8fm9" event={"ID":"a53dffa9-5f15-4495-a017-c496e0218280","Type":"ContainerStarted","Data":"bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58"} Nov 26 17:28:26 crc kubenswrapper[5010]: I1126 17:28:26.773480 5010 generic.go:334] "Generic (PLEG): container finished" podID="a53dffa9-5f15-4495-a017-c496e0218280" containerID="bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58" exitCode=0 Nov 26 17:28:26 crc kubenswrapper[5010]: I1126 17:28:26.773530 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8fm9" event={"ID":"a53dffa9-5f15-4495-a017-c496e0218280","Type":"ContainerDied","Data":"bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58"} Nov 26 17:28:27 crc kubenswrapper[5010]: I1126 17:28:27.799068 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8fm9" event={"ID":"a53dffa9-5f15-4495-a017-c496e0218280","Type":"ContainerStarted","Data":"1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46"} Nov 26 17:28:27 crc kubenswrapper[5010]: I1126 17:28:27.827147 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c8fm9" podStartSLOduration=3.056371688 podStartE2EDuration="9.827129016s" podCreationTimestamp="2025-11-26 17:28:18 +0000 UTC" firstStartedPulling="2025-11-26 17:28:20.707373483 +0000 UTC m=+7321.498090671" lastFinishedPulling="2025-11-26 17:28:27.478130811 +0000 UTC m=+7328.268847999" observedRunningTime="2025-11-26 17:28:27.819360197 +0000 UTC m=+7328.610077355" watchObservedRunningTime="2025-11-26 17:28:27.827129016 +0000 UTC m=+7328.617846164" Nov 26 17:28:29 crc kubenswrapper[5010]: I1126 17:28:29.136460 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:29 crc kubenswrapper[5010]: I1126 17:28:29.136952 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:29 crc kubenswrapper[5010]: E1126 17:28:29.783943 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-conmon-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:28:30 crc kubenswrapper[5010]: I1126 17:28:30.189800 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-c8fm9" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="registry-server" probeResult="failure" output=< Nov 26 17:28:30 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:28:30 crc kubenswrapper[5010]: > Nov 26 17:28:30 crc kubenswrapper[5010]: I1126 17:28:30.891937 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:28:30 crc kubenswrapper[5010]: E1126 17:28:30.892306 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:28:39 crc kubenswrapper[5010]: I1126 17:28:39.212677 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:39 crc kubenswrapper[5010]: I1126 17:28:39.305049 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:28:39 crc kubenswrapper[5010]: I1126 17:28:39.438091 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c8fm9"] Nov 26 17:28:39 crc kubenswrapper[5010]: I1126 17:28:39.495441 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-prr7m"] Nov 26 17:28:39 crc kubenswrapper[5010]: I1126 17:28:39.495835 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-prr7m" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="registry-server" containerID="cri-o://4c3aa545b2d5ef30b93075365cc32f724dc2f50ebc1de478ed7eff8ba11435c9" gracePeriod=2 Nov 26 17:28:39 crc kubenswrapper[5010]: I1126 17:28:39.991501 5010 generic.go:334] "Generic (PLEG): container finished" podID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerID="4c3aa545b2d5ef30b93075365cc32f724dc2f50ebc1de478ed7eff8ba11435c9" exitCode=0 Nov 26 17:28:39 crc kubenswrapper[5010]: I1126 17:28:39.991540 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prr7m" event={"ID":"8d2e746f-8a21-453a-b29f-db02c74e06d8","Type":"ContainerDied","Data":"4c3aa545b2d5ef30b93075365cc32f724dc2f50ebc1de478ed7eff8ba11435c9"} Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.077654 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 17:28:40 crc kubenswrapper[5010]: E1126 17:28:40.172645 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-conmon-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.227443 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-utilities\") pod \"8d2e746f-8a21-453a-b29f-db02c74e06d8\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.227484 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-chv8j\" (UniqueName: \"kubernetes.io/projected/8d2e746f-8a21-453a-b29f-db02c74e06d8-kube-api-access-chv8j\") pod \"8d2e746f-8a21-453a-b29f-db02c74e06d8\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.227607 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-catalog-content\") pod \"8d2e746f-8a21-453a-b29f-db02c74e06d8\" (UID: \"8d2e746f-8a21-453a-b29f-db02c74e06d8\") " Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.227791 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-utilities" (OuterVolumeSpecName: "utilities") pod "8d2e746f-8a21-453a-b29f-db02c74e06d8" (UID: "8d2e746f-8a21-453a-b29f-db02c74e06d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.228161 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.233178 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d2e746f-8a21-453a-b29f-db02c74e06d8-kube-api-access-chv8j" (OuterVolumeSpecName: "kube-api-access-chv8j") pod "8d2e746f-8a21-453a-b29f-db02c74e06d8" (UID: "8d2e746f-8a21-453a-b29f-db02c74e06d8"). InnerVolumeSpecName "kube-api-access-chv8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.285083 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d2e746f-8a21-453a-b29f-db02c74e06d8" (UID: "8d2e746f-8a21-453a-b29f-db02c74e06d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.330990 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-chv8j\" (UniqueName: \"kubernetes.io/projected/8d2e746f-8a21-453a-b29f-db02c74e06d8-kube-api-access-chv8j\") on node \"crc\" DevicePath \"\"" Nov 26 17:28:40 crc kubenswrapper[5010]: I1126 17:28:40.331065 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d2e746f-8a21-453a-b29f-db02c74e06d8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.005593 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-prr7m" event={"ID":"8d2e746f-8a21-453a-b29f-db02c74e06d8","Type":"ContainerDied","Data":"6f0b785ad007ecfe196950fcc5c3dceb5d90484cd91dd1e9204f83ec7042b865"} Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.005665 5010 scope.go:117] "RemoveContainer" containerID="4c3aa545b2d5ef30b93075365cc32f724dc2f50ebc1de478ed7eff8ba11435c9" Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.005977 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-prr7m" Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.035321 5010 scope.go:117] "RemoveContainer" containerID="5155b8a6186109f594e7a6341cfdfa07eb32409b5aaf43a9c08a1bea3cad00dc" Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.060961 5010 scope.go:117] "RemoveContainer" containerID="3030a9e573d20520dfce3be890de502d41d9416cfc7f85653432ba829427304d" Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.070571 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-prr7m"] Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.080052 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-prr7m"] Nov 26 17:28:41 crc kubenswrapper[5010]: I1126 17:28:41.917106 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" path="/var/lib/kubelet/pods/8d2e746f-8a21-453a-b29f-db02c74e06d8/volumes" Nov 26 17:28:45 crc kubenswrapper[5010]: I1126 17:28:45.893805 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:28:47 crc kubenswrapper[5010]: I1126 17:28:47.108845 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"7db55c6c053150d443cfe1b3eed0acf45eccd3a1c48db253581be4c168619c6b"} Nov 26 17:28:50 crc kubenswrapper[5010]: E1126 17:28:50.440378 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-conmon-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:29:00 crc kubenswrapper[5010]: E1126 17:29:00.740909 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-conmon-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:29:11 crc kubenswrapper[5010]: E1126 17:29:11.039313 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda53dffa9_5f15_4495_a017_c496e0218280.slice/crio-conmon-034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.173617 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9"] Nov 26 17:30:00 crc kubenswrapper[5010]: E1126 17:30:00.174824 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="registry-server" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.174843 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="registry-server" Nov 26 17:30:00 crc kubenswrapper[5010]: E1126 17:30:00.174894 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="extract-utilities" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.174903 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="extract-utilities" Nov 26 17:30:00 crc kubenswrapper[5010]: E1126 17:30:00.174925 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="extract-content" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.174931 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="extract-content" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.175163 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d2e746f-8a21-453a-b29f-db02c74e06d8" containerName="registry-server" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.175937 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.178447 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.179069 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.207745 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9"] Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.279231 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-config-volume\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.279854 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tnq6k\" (UniqueName: \"kubernetes.io/projected/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-kube-api-access-tnq6k\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.279943 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-secret-volume\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.381801 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tnq6k\" (UniqueName: \"kubernetes.io/projected/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-kube-api-access-tnq6k\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.381863 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-secret-volume\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.381886 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-config-volume\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.383022 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-config-volume\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.390277 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-secret-volume\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.402400 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tnq6k\" (UniqueName: \"kubernetes.io/projected/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-kube-api-access-tnq6k\") pod \"collect-profiles-29402970-p49p9\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:00 crc kubenswrapper[5010]: I1126 17:30:00.512870 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:01 crc kubenswrapper[5010]: I1126 17:30:01.020687 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9"] Nov 26 17:30:01 crc kubenswrapper[5010]: W1126 17:30:01.021954 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod49347c8e_e9fc_4a14_ba7a_19f5d2401d43.slice/crio-9297e39f4c280ea98aa675070458a786fef77e237f6ddca148db3afe1015cd41 WatchSource:0}: Error finding container 9297e39f4c280ea98aa675070458a786fef77e237f6ddca148db3afe1015cd41: Status 404 returned error can't find the container with id 9297e39f4c280ea98aa675070458a786fef77e237f6ddca148db3afe1015cd41 Nov 26 17:30:01 crc kubenswrapper[5010]: I1126 17:30:01.960366 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" event={"ID":"49347c8e-e9fc-4a14-ba7a-19f5d2401d43","Type":"ContainerStarted","Data":"483128961b2824cebffbca5da39bdba86d047947402217024418b48cbc18856c"} Nov 26 17:30:01 crc kubenswrapper[5010]: I1126 17:30:01.960611 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" event={"ID":"49347c8e-e9fc-4a14-ba7a-19f5d2401d43","Type":"ContainerStarted","Data":"9297e39f4c280ea98aa675070458a786fef77e237f6ddca148db3afe1015cd41"} Nov 26 17:30:01 crc kubenswrapper[5010]: I1126 17:30:01.982567 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" podStartSLOduration=1.9825465599999998 podStartE2EDuration="1.98254656s" podCreationTimestamp="2025-11-26 17:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 17:30:01.979146848 +0000 UTC m=+7422.769863996" watchObservedRunningTime="2025-11-26 17:30:01.98254656 +0000 UTC m=+7422.773263718" Nov 26 17:30:02 crc kubenswrapper[5010]: I1126 17:30:02.973406 5010 generic.go:334] "Generic (PLEG): container finished" podID="49347c8e-e9fc-4a14-ba7a-19f5d2401d43" containerID="483128961b2824cebffbca5da39bdba86d047947402217024418b48cbc18856c" exitCode=0 Nov 26 17:30:02 crc kubenswrapper[5010]: I1126 17:30:02.973472 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" event={"ID":"49347c8e-e9fc-4a14-ba7a-19f5d2401d43","Type":"ContainerDied","Data":"483128961b2824cebffbca5da39bdba86d047947402217024418b48cbc18856c"} Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.343907 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.472925 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-secret-volume\") pod \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.473128 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tnq6k\" (UniqueName: \"kubernetes.io/projected/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-kube-api-access-tnq6k\") pod \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.473227 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-config-volume\") pod \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\" (UID: \"49347c8e-e9fc-4a14-ba7a-19f5d2401d43\") " Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.474060 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-config-volume" (OuterVolumeSpecName: "config-volume") pod "49347c8e-e9fc-4a14-ba7a-19f5d2401d43" (UID: "49347c8e-e9fc-4a14-ba7a-19f5d2401d43"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.479354 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "49347c8e-e9fc-4a14-ba7a-19f5d2401d43" (UID: "49347c8e-e9fc-4a14-ba7a-19f5d2401d43"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.480001 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-kube-api-access-tnq6k" (OuterVolumeSpecName: "kube-api-access-tnq6k") pod "49347c8e-e9fc-4a14-ba7a-19f5d2401d43" (UID: "49347c8e-e9fc-4a14-ba7a-19f5d2401d43"). InnerVolumeSpecName "kube-api-access-tnq6k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.575920 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tnq6k\" (UniqueName: \"kubernetes.io/projected/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-kube-api-access-tnq6k\") on node \"crc\" DevicePath \"\"" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.575955 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.575964 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49347c8e-e9fc-4a14-ba7a-19f5d2401d43-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.997970 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" event={"ID":"49347c8e-e9fc-4a14-ba7a-19f5d2401d43","Type":"ContainerDied","Data":"9297e39f4c280ea98aa675070458a786fef77e237f6ddca148db3afe1015cd41"} Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.998018 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9297e39f4c280ea98aa675070458a786fef77e237f6ddca148db3afe1015cd41" Nov 26 17:30:04 crc kubenswrapper[5010]: I1126 17:30:04.998062 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9" Nov 26 17:30:05 crc kubenswrapper[5010]: I1126 17:30:05.052338 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj"] Nov 26 17:30:05 crc kubenswrapper[5010]: I1126 17:30:05.061286 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402925-4qnzj"] Nov 26 17:30:05 crc kubenswrapper[5010]: I1126 17:30:05.912295 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cb3e510-06f9-4bf3-9b5c-382312c3b4fe" path="/var/lib/kubelet/pods/1cb3e510-06f9-4bf3-9b5c-382312c3b4fe/volumes" Nov 26 17:30:12 crc kubenswrapper[5010]: I1126 17:30:12.307820 5010 scope.go:117] "RemoveContainer" containerID="13bae8e2b7d0ae7e48b49845a8a5ebc69ac26eeeabbe26448bb9cfce815219c1" Nov 26 17:31:11 crc kubenswrapper[5010]: I1126 17:31:11.423422 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:31:11 crc kubenswrapper[5010]: I1126 17:31:11.424212 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:31:41 crc kubenswrapper[5010]: I1126 17:31:41.422603 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:31:41 crc kubenswrapper[5010]: I1126 17:31:41.423514 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.503759 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kzf7g"] Nov 26 17:32:02 crc kubenswrapper[5010]: E1126 17:32:02.504879 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49347c8e-e9fc-4a14-ba7a-19f5d2401d43" containerName="collect-profiles" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.504899 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="49347c8e-e9fc-4a14-ba7a-19f5d2401d43" containerName="collect-profiles" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.505182 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="49347c8e-e9fc-4a14-ba7a-19f5d2401d43" containerName="collect-profiles" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.508186 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.531857 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzf7g"] Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.667654 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-catalog-content\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.667743 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpqmj\" (UniqueName: \"kubernetes.io/projected/f0241b04-bc4b-4d79-844c-c9a79f5325f5-kube-api-access-xpqmj\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.667782 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-utilities\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.769511 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpqmj\" (UniqueName: \"kubernetes.io/projected/f0241b04-bc4b-4d79-844c-c9a79f5325f5-kube-api-access-xpqmj\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.769582 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-utilities\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.769799 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-catalog-content\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.770323 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-catalog-content\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.770679 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-utilities\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.795084 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpqmj\" (UniqueName: \"kubernetes.io/projected/f0241b04-bc4b-4d79-844c-c9a79f5325f5-kube-api-access-xpqmj\") pod \"redhat-marketplace-kzf7g\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:02 crc kubenswrapper[5010]: I1126 17:32:02.846221 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:03 crc kubenswrapper[5010]: I1126 17:32:03.412162 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzf7g"] Nov 26 17:32:03 crc kubenswrapper[5010]: W1126 17:32:03.412558 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf0241b04_bc4b_4d79_844c_c9a79f5325f5.slice/crio-d6e3413de2395cb09cfc0a35ef031b0ecd5d31f374c98b00b06f67353966bd48 WatchSource:0}: Error finding container d6e3413de2395cb09cfc0a35ef031b0ecd5d31f374c98b00b06f67353966bd48: Status 404 returned error can't find the container with id d6e3413de2395cb09cfc0a35ef031b0ecd5d31f374c98b00b06f67353966bd48 Nov 26 17:32:03 crc kubenswrapper[5010]: I1126 17:32:03.555362 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzf7g" event={"ID":"f0241b04-bc4b-4d79-844c-c9a79f5325f5","Type":"ContainerStarted","Data":"d6e3413de2395cb09cfc0a35ef031b0ecd5d31f374c98b00b06f67353966bd48"} Nov 26 17:32:04 crc kubenswrapper[5010]: I1126 17:32:04.568671 5010 generic.go:334] "Generic (PLEG): container finished" podID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerID="af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846" exitCode=0 Nov 26 17:32:04 crc kubenswrapper[5010]: I1126 17:32:04.568797 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzf7g" event={"ID":"f0241b04-bc4b-4d79-844c-c9a79f5325f5","Type":"ContainerDied","Data":"af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846"} Nov 26 17:32:06 crc kubenswrapper[5010]: I1126 17:32:06.591770 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzf7g" event={"ID":"f0241b04-bc4b-4d79-844c-c9a79f5325f5","Type":"ContainerStarted","Data":"38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77"} Nov 26 17:32:07 crc kubenswrapper[5010]: I1126 17:32:07.604537 5010 generic.go:334] "Generic (PLEG): container finished" podID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerID="38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77" exitCode=0 Nov 26 17:32:07 crc kubenswrapper[5010]: I1126 17:32:07.604679 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzf7g" event={"ID":"f0241b04-bc4b-4d79-844c-c9a79f5325f5","Type":"ContainerDied","Data":"38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77"} Nov 26 17:32:09 crc kubenswrapper[5010]: I1126 17:32:09.627007 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzf7g" event={"ID":"f0241b04-bc4b-4d79-844c-c9a79f5325f5","Type":"ContainerStarted","Data":"6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597"} Nov 26 17:32:09 crc kubenswrapper[5010]: I1126 17:32:09.649830 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kzf7g" podStartSLOduration=3.79896376 podStartE2EDuration="7.649799403s" podCreationTimestamp="2025-11-26 17:32:02 +0000 UTC" firstStartedPulling="2025-11-26 17:32:04.571137984 +0000 UTC m=+7545.361855172" lastFinishedPulling="2025-11-26 17:32:08.421973657 +0000 UTC m=+7549.212690815" observedRunningTime="2025-11-26 17:32:09.645471148 +0000 UTC m=+7550.436188296" watchObservedRunningTime="2025-11-26 17:32:09.649799403 +0000 UTC m=+7550.440516611" Nov 26 17:32:11 crc kubenswrapper[5010]: I1126 17:32:11.422745 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:32:11 crc kubenswrapper[5010]: I1126 17:32:11.423063 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:32:11 crc kubenswrapper[5010]: I1126 17:32:11.423111 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:32:11 crc kubenswrapper[5010]: I1126 17:32:11.423941 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7db55c6c053150d443cfe1b3eed0acf45eccd3a1c48db253581be4c168619c6b"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:32:11 crc kubenswrapper[5010]: I1126 17:32:11.423998 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://7db55c6c053150d443cfe1b3eed0acf45eccd3a1c48db253581be4c168619c6b" gracePeriod=600 Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.662090 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="7db55c6c053150d443cfe1b3eed0acf45eccd3a1c48db253581be4c168619c6b" exitCode=0 Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.662160 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"7db55c6c053150d443cfe1b3eed0acf45eccd3a1c48db253581be4c168619c6b"} Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.662557 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec"} Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.662579 5010 scope.go:117] "RemoveContainer" containerID="c9400bde466b60077b27405e11a70351d2a04bd1fac61977871b31666f8d6c90" Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.676507 5010 generic.go:334] "Generic (PLEG): container finished" podID="484ffc98-a27e-4fc3-9fb9-70c960bd0699" containerID="5b9d6224840167d0c4ce1c55781d0894844fea51226289dcf1a3950705dfbacf" exitCode=0 Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.676561 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" event={"ID":"484ffc98-a27e-4fc3-9fb9-70c960bd0699","Type":"ContainerDied","Data":"5b9d6224840167d0c4ce1c55781d0894844fea51226289dcf1a3950705dfbacf"} Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.847366 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.847447 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:12 crc kubenswrapper[5010]: I1126 17:32:12.911930 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:13 crc kubenswrapper[5010]: I1126 17:32:13.781506 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:13 crc kubenswrapper[5010]: I1126 17:32:13.834841 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzf7g"] Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.391953 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.553617 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-inventory\") pod \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.554005 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw6fz\" (UniqueName: \"kubernetes.io/projected/484ffc98-a27e-4fc3-9fb9-70c960bd0699-kube-api-access-gw6fz\") pod \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.554156 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-ssh-key\") pod \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.554314 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-tripleo-cleanup-combined-ca-bundle\") pod \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\" (UID: \"484ffc98-a27e-4fc3-9fb9-70c960bd0699\") " Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.566320 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/484ffc98-a27e-4fc3-9fb9-70c960bd0699-kube-api-access-gw6fz" (OuterVolumeSpecName: "kube-api-access-gw6fz") pod "484ffc98-a27e-4fc3-9fb9-70c960bd0699" (UID: "484ffc98-a27e-4fc3-9fb9-70c960bd0699"). InnerVolumeSpecName "kube-api-access-gw6fz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.567288 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-tripleo-cleanup-combined-ca-bundle" (OuterVolumeSpecName: "tripleo-cleanup-combined-ca-bundle") pod "484ffc98-a27e-4fc3-9fb9-70c960bd0699" (UID: "484ffc98-a27e-4fc3-9fb9-70c960bd0699"). InnerVolumeSpecName "tripleo-cleanup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.589087 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-inventory" (OuterVolumeSpecName: "inventory") pod "484ffc98-a27e-4fc3-9fb9-70c960bd0699" (UID: "484ffc98-a27e-4fc3-9fb9-70c960bd0699"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.601270 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "484ffc98-a27e-4fc3-9fb9-70c960bd0699" (UID: "484ffc98-a27e-4fc3-9fb9-70c960bd0699"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.656640 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.656678 5010 reconciler_common.go:293] "Volume detached for volume \"tripleo-cleanup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-tripleo-cleanup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.656693 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/484ffc98-a27e-4fc3-9fb9-70c960bd0699-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.656704 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw6fz\" (UniqueName: \"kubernetes.io/projected/484ffc98-a27e-4fc3-9fb9-70c960bd0699-kube-api-access-gw6fz\") on node \"crc\" DevicePath \"\"" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.727024 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.728041 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm" event={"ID":"484ffc98-a27e-4fc3-9fb9-70c960bd0699","Type":"ContainerDied","Data":"b0ce9f13b4c33e358b14cdb0f3b54eea90b775ed2ff581adbe1e4d84ecd0b62a"} Nov 26 17:32:14 crc kubenswrapper[5010]: I1126 17:32:14.728085 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0ce9f13b4c33e358b14cdb0f3b54eea90b775ed2ff581adbe1e4d84ecd0b62a" Nov 26 17:32:15 crc kubenswrapper[5010]: I1126 17:32:15.739473 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kzf7g" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="registry-server" containerID="cri-o://6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597" gracePeriod=2 Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.260599 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.396452 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-catalog-content\") pod \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.396612 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpqmj\" (UniqueName: \"kubernetes.io/projected/f0241b04-bc4b-4d79-844c-c9a79f5325f5-kube-api-access-xpqmj\") pod \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.396826 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-utilities\") pod \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\" (UID: \"f0241b04-bc4b-4d79-844c-c9a79f5325f5\") " Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.398259 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-utilities" (OuterVolumeSpecName: "utilities") pod "f0241b04-bc4b-4d79-844c-c9a79f5325f5" (UID: "f0241b04-bc4b-4d79-844c-c9a79f5325f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.405814 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0241b04-bc4b-4d79-844c-c9a79f5325f5-kube-api-access-xpqmj" (OuterVolumeSpecName: "kube-api-access-xpqmj") pod "f0241b04-bc4b-4d79-844c-c9a79f5325f5" (UID: "f0241b04-bc4b-4d79-844c-c9a79f5325f5"). InnerVolumeSpecName "kube-api-access-xpqmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.416555 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f0241b04-bc4b-4d79-844c-c9a79f5325f5" (UID: "f0241b04-bc4b-4d79-844c-c9a79f5325f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.499699 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.499888 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpqmj\" (UniqueName: \"kubernetes.io/projected/f0241b04-bc4b-4d79-844c-c9a79f5325f5-kube-api-access-xpqmj\") on node \"crc\" DevicePath \"\"" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.499947 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0241b04-bc4b-4d79-844c-c9a79f5325f5-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.755365 5010 generic.go:334] "Generic (PLEG): container finished" podID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerID="6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597" exitCode=0 Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.755763 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzf7g" event={"ID":"f0241b04-bc4b-4d79-844c-c9a79f5325f5","Type":"ContainerDied","Data":"6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597"} Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.755815 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzf7g" event={"ID":"f0241b04-bc4b-4d79-844c-c9a79f5325f5","Type":"ContainerDied","Data":"d6e3413de2395cb09cfc0a35ef031b0ecd5d31f374c98b00b06f67353966bd48"} Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.755836 5010 scope.go:117] "RemoveContainer" containerID="6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.756062 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzf7g" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.795780 5010 scope.go:117] "RemoveContainer" containerID="38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.799015 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzf7g"] Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.815243 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzf7g"] Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.821949 5010 scope.go:117] "RemoveContainer" containerID="af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.884695 5010 scope.go:117] "RemoveContainer" containerID="6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597" Nov 26 17:32:16 crc kubenswrapper[5010]: E1126 17:32:16.885277 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597\": container with ID starting with 6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597 not found: ID does not exist" containerID="6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.885399 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597"} err="failed to get container status \"6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597\": rpc error: code = NotFound desc = could not find container \"6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597\": container with ID starting with 6257e67cb7cdb2a7b002cfb6be73ffc4de358f7efdc984c3f6d27ae08d68a597 not found: ID does not exist" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.885431 5010 scope.go:117] "RemoveContainer" containerID="38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77" Nov 26 17:32:16 crc kubenswrapper[5010]: E1126 17:32:16.886228 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77\": container with ID starting with 38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77 not found: ID does not exist" containerID="38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.886278 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77"} err="failed to get container status \"38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77\": rpc error: code = NotFound desc = could not find container \"38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77\": container with ID starting with 38f76627fb3056c30815512369cdf4ba9777cc5e7c7522db9b57766e1862fa77 not found: ID does not exist" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.886310 5010 scope.go:117] "RemoveContainer" containerID="af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846" Nov 26 17:32:16 crc kubenswrapper[5010]: E1126 17:32:16.886541 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846\": container with ID starting with af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846 not found: ID does not exist" containerID="af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846" Nov 26 17:32:16 crc kubenswrapper[5010]: I1126 17:32:16.886562 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846"} err="failed to get container status \"af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846\": rpc error: code = NotFound desc = could not find container \"af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846\": container with ID starting with af0c67d1bf00c1fa37880fc3147ce36f022b6a96a014412265befac5471df846 not found: ID does not exist" Nov 26 17:32:17 crc kubenswrapper[5010]: I1126 17:32:17.907842 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" path="/var/lib/kubelet/pods/f0241b04-bc4b-4d79-844c-c9a79f5325f5/volumes" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.138788 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-j2qxs"] Nov 26 17:32:28 crc kubenswrapper[5010]: E1126 17:32:28.140144 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="registry-server" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.140165 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="registry-server" Nov 26 17:32:28 crc kubenswrapper[5010]: E1126 17:32:28.140197 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="extract-utilities" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.140208 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="extract-utilities" Nov 26 17:32:28 crc kubenswrapper[5010]: E1126 17:32:28.140256 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="484ffc98-a27e-4fc3-9fb9-70c960bd0699" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.140271 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="484ffc98-a27e-4fc3-9fb9-70c960bd0699" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 26 17:32:28 crc kubenswrapper[5010]: E1126 17:32:28.140484 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="extract-content" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.140500 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="extract-content" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.140942 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0241b04-bc4b-4d79-844c-c9a79f5325f5" containerName="registry-server" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.141010 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="484ffc98-a27e-4fc3-9fb9-70c960bd0699" containerName="tripleo-cleanup-tripleo-cleanup-openstack-cell1" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.142111 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.146567 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.146855 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.147156 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.150536 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.158943 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-j2qxs"] Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.292175 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-inventory\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.292236 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.292289 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmw2q\" (UniqueName: \"kubernetes.io/projected/96fc8d0a-7889-436f-95bd-2e6d59921db3-kube-api-access-cmw2q\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.292495 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.394077 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-inventory\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.394118 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.394148 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmw2q\" (UniqueName: \"kubernetes.io/projected/96fc8d0a-7889-436f-95bd-2e6d59921db3-kube-api-access-cmw2q\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.394279 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.404405 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-ssh-key\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.404405 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-inventory\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.405310 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-bootstrap-combined-ca-bundle\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.413538 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmw2q\" (UniqueName: \"kubernetes.io/projected/96fc8d0a-7889-436f-95bd-2e6d59921db3-kube-api-access-cmw2q\") pod \"bootstrap-openstack-openstack-cell1-j2qxs\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:28 crc kubenswrapper[5010]: I1126 17:32:28.482509 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:32:29 crc kubenswrapper[5010]: I1126 17:32:29.232194 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-openstack-openstack-cell1-j2qxs"] Nov 26 17:32:29 crc kubenswrapper[5010]: I1126 17:32:29.969692 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" event={"ID":"96fc8d0a-7889-436f-95bd-2e6d59921db3","Type":"ContainerStarted","Data":"8708cd85367c88ced689a9dad8c667c75e87b7f26c09019f78b3b89183af9a3b"} Nov 26 17:32:30 crc kubenswrapper[5010]: I1126 17:32:30.983382 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" event={"ID":"96fc8d0a-7889-436f-95bd-2e6d59921db3","Type":"ContainerStarted","Data":"47577565cb76f4635a13e5ec00c8da68f912231b1fb95196a9ecc32d69ddbefe"} Nov 26 17:32:31 crc kubenswrapper[5010]: I1126 17:32:31.007624 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" podStartSLOduration=2.5725206800000002 podStartE2EDuration="3.00760227s" podCreationTimestamp="2025-11-26 17:32:28 +0000 UTC" firstStartedPulling="2025-11-26 17:32:29.236318235 +0000 UTC m=+7570.027035383" lastFinishedPulling="2025-11-26 17:32:29.671399825 +0000 UTC m=+7570.462116973" observedRunningTime="2025-11-26 17:32:30.999737228 +0000 UTC m=+7571.790454386" watchObservedRunningTime="2025-11-26 17:32:31.00760227 +0000 UTC m=+7571.798319418" Nov 26 17:34:41 crc kubenswrapper[5010]: I1126 17:34:41.423042 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:34:41 crc kubenswrapper[5010]: I1126 17:34:41.423524 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:35:11 crc kubenswrapper[5010]: I1126 17:35:11.422600 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:35:11 crc kubenswrapper[5010]: I1126 17:35:11.423584 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:35:38 crc kubenswrapper[5010]: I1126 17:35:38.992783 5010 generic.go:334] "Generic (PLEG): container finished" podID="96fc8d0a-7889-436f-95bd-2e6d59921db3" containerID="47577565cb76f4635a13e5ec00c8da68f912231b1fb95196a9ecc32d69ddbefe" exitCode=0 Nov 26 17:35:38 crc kubenswrapper[5010]: I1126 17:35:38.992916 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" event={"ID":"96fc8d0a-7889-436f-95bd-2e6d59921db3","Type":"ContainerDied","Data":"47577565cb76f4635a13e5ec00c8da68f912231b1fb95196a9ecc32d69ddbefe"} Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.482151 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.588741 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-inventory\") pod \"96fc8d0a-7889-436f-95bd-2e6d59921db3\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.588873 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-bootstrap-combined-ca-bundle\") pod \"96fc8d0a-7889-436f-95bd-2e6d59921db3\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.588967 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmw2q\" (UniqueName: \"kubernetes.io/projected/96fc8d0a-7889-436f-95bd-2e6d59921db3-kube-api-access-cmw2q\") pod \"96fc8d0a-7889-436f-95bd-2e6d59921db3\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.589013 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-ssh-key\") pod \"96fc8d0a-7889-436f-95bd-2e6d59921db3\" (UID: \"96fc8d0a-7889-436f-95bd-2e6d59921db3\") " Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.595533 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "96fc8d0a-7889-436f-95bd-2e6d59921db3" (UID: "96fc8d0a-7889-436f-95bd-2e6d59921db3"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.596416 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96fc8d0a-7889-436f-95bd-2e6d59921db3-kube-api-access-cmw2q" (OuterVolumeSpecName: "kube-api-access-cmw2q") pod "96fc8d0a-7889-436f-95bd-2e6d59921db3" (UID: "96fc8d0a-7889-436f-95bd-2e6d59921db3"). InnerVolumeSpecName "kube-api-access-cmw2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.619098 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "96fc8d0a-7889-436f-95bd-2e6d59921db3" (UID: "96fc8d0a-7889-436f-95bd-2e6d59921db3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.633263 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-inventory" (OuterVolumeSpecName: "inventory") pod "96fc8d0a-7889-436f-95bd-2e6d59921db3" (UID: "96fc8d0a-7889-436f-95bd-2e6d59921db3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.692072 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.692199 5010 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.692257 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmw2q\" (UniqueName: \"kubernetes.io/projected/96fc8d0a-7889-436f-95bd-2e6d59921db3-kube-api-access-cmw2q\") on node \"crc\" DevicePath \"\"" Nov 26 17:35:40 crc kubenswrapper[5010]: I1126 17:35:40.692309 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/96fc8d0a-7889-436f-95bd-2e6d59921db3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.017066 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" event={"ID":"96fc8d0a-7889-436f-95bd-2e6d59921db3","Type":"ContainerDied","Data":"8708cd85367c88ced689a9dad8c667c75e87b7f26c09019f78b3b89183af9a3b"} Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.017442 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8708cd85367c88ced689a9dad8c667c75e87b7f26c09019f78b3b89183af9a3b" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.017161 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-openstack-openstack-cell1-j2qxs" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.097176 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-8t624"] Nov 26 17:35:41 crc kubenswrapper[5010]: E1126 17:35:41.115046 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96fc8d0a-7889-436f-95bd-2e6d59921db3" containerName="bootstrap-openstack-openstack-cell1" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.115101 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="96fc8d0a-7889-436f-95bd-2e6d59921db3" containerName="bootstrap-openstack-openstack-cell1" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.115814 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="96fc8d0a-7889-436f-95bd-2e6d59921db3" containerName="bootstrap-openstack-openstack-cell1" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.123953 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.129562 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.129851 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.130595 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.130964 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.161136 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-8t624"] Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.310802 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwjnk\" (UniqueName: \"kubernetes.io/projected/b09df012-4d2d-418a-8b1b-79247ce409f3-kube-api-access-rwjnk\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.311309 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-inventory\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.311865 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-ssh-key\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.415424 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-inventory\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.415536 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-ssh-key\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.415598 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwjnk\" (UniqueName: \"kubernetes.io/projected/b09df012-4d2d-418a-8b1b-79247ce409f3-kube-api-access-rwjnk\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.422985 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.423050 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.423096 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.423859 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-inventory\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.423949 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.424003 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" gracePeriod=600 Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.424685 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-ssh-key\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.451343 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwjnk\" (UniqueName: \"kubernetes.io/projected/b09df012-4d2d-418a-8b1b-79247ce409f3-kube-api-access-rwjnk\") pod \"download-cache-openstack-openstack-cell1-8t624\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:41 crc kubenswrapper[5010]: E1126 17:35:41.571020 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:35:41 crc kubenswrapper[5010]: I1126 17:35:41.750416 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:35:42 crc kubenswrapper[5010]: I1126 17:35:42.027159 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" exitCode=0 Nov 26 17:35:42 crc kubenswrapper[5010]: I1126 17:35:42.027946 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec"} Nov 26 17:35:42 crc kubenswrapper[5010]: I1126 17:35:42.027990 5010 scope.go:117] "RemoveContainer" containerID="7db55c6c053150d443cfe1b3eed0acf45eccd3a1c48db253581be4c168619c6b" Nov 26 17:35:42 crc kubenswrapper[5010]: I1126 17:35:42.029464 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:35:42 crc kubenswrapper[5010]: E1126 17:35:42.029931 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:35:42 crc kubenswrapper[5010]: I1126 17:35:42.360599 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-openstack-openstack-cell1-8t624"] Nov 26 17:35:42 crc kubenswrapper[5010]: I1126 17:35:42.380017 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:35:43 crc kubenswrapper[5010]: I1126 17:35:43.042089 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8t624" event={"ID":"b09df012-4d2d-418a-8b1b-79247ce409f3","Type":"ContainerStarted","Data":"1995e917867fbf541010a84e334918379000aaaccea15a42fdc04de4b5f8ca89"} Nov 26 17:35:44 crc kubenswrapper[5010]: I1126 17:35:44.052673 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8t624" event={"ID":"b09df012-4d2d-418a-8b1b-79247ce409f3","Type":"ContainerStarted","Data":"6eeb5e341b2553cda19f5b9031db20ae65b6af3dca88ed721ea29edbee5374c2"} Nov 26 17:35:44 crc kubenswrapper[5010]: I1126 17:35:44.081040 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-openstack-openstack-cell1-8t624" podStartSLOduration=2.070487404 podStartE2EDuration="3.081013808s" podCreationTimestamp="2025-11-26 17:35:41 +0000 UTC" firstStartedPulling="2025-11-26 17:35:42.379822633 +0000 UTC m=+7763.170539781" lastFinishedPulling="2025-11-26 17:35:43.390348997 +0000 UTC m=+7764.181066185" observedRunningTime="2025-11-26 17:35:44.077551912 +0000 UTC m=+7764.868269100" watchObservedRunningTime="2025-11-26 17:35:44.081013808 +0000 UTC m=+7764.871730956" Nov 26 17:35:54 crc kubenswrapper[5010]: I1126 17:35:54.893135 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:35:54 crc kubenswrapper[5010]: E1126 17:35:54.894392 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:36:08 crc kubenswrapper[5010]: I1126 17:36:08.899409 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:36:08 crc kubenswrapper[5010]: E1126 17:36:08.901026 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:36:19 crc kubenswrapper[5010]: I1126 17:36:19.905185 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:36:19 crc kubenswrapper[5010]: E1126 17:36:19.906373 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:36:32 crc kubenswrapper[5010]: I1126 17:36:32.891848 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:36:32 crc kubenswrapper[5010]: E1126 17:36:32.893132 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:36:46 crc kubenswrapper[5010]: I1126 17:36:46.892504 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:36:46 crc kubenswrapper[5010]: E1126 17:36:46.893398 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:37:00 crc kubenswrapper[5010]: I1126 17:37:00.893006 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:37:00 crc kubenswrapper[5010]: E1126 17:37:00.893900 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:37:12 crc kubenswrapper[5010]: I1126 17:37:12.892011 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:37:12 crc kubenswrapper[5010]: E1126 17:37:12.892827 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:37:16 crc kubenswrapper[5010]: I1126 17:37:16.140137 5010 generic.go:334] "Generic (PLEG): container finished" podID="b09df012-4d2d-418a-8b1b-79247ce409f3" containerID="6eeb5e341b2553cda19f5b9031db20ae65b6af3dca88ed721ea29edbee5374c2" exitCode=0 Nov 26 17:37:16 crc kubenswrapper[5010]: I1126 17:37:16.140303 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8t624" event={"ID":"b09df012-4d2d-418a-8b1b-79247ce409f3","Type":"ContainerDied","Data":"6eeb5e341b2553cda19f5b9031db20ae65b6af3dca88ed721ea29edbee5374c2"} Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.665517 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.722257 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwjnk\" (UniqueName: \"kubernetes.io/projected/b09df012-4d2d-418a-8b1b-79247ce409f3-kube-api-access-rwjnk\") pod \"b09df012-4d2d-418a-8b1b-79247ce409f3\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.722306 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-ssh-key\") pod \"b09df012-4d2d-418a-8b1b-79247ce409f3\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.722521 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-inventory\") pod \"b09df012-4d2d-418a-8b1b-79247ce409f3\" (UID: \"b09df012-4d2d-418a-8b1b-79247ce409f3\") " Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.731618 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b09df012-4d2d-418a-8b1b-79247ce409f3-kube-api-access-rwjnk" (OuterVolumeSpecName: "kube-api-access-rwjnk") pod "b09df012-4d2d-418a-8b1b-79247ce409f3" (UID: "b09df012-4d2d-418a-8b1b-79247ce409f3"). InnerVolumeSpecName "kube-api-access-rwjnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.756588 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-inventory" (OuterVolumeSpecName: "inventory") pod "b09df012-4d2d-418a-8b1b-79247ce409f3" (UID: "b09df012-4d2d-418a-8b1b-79247ce409f3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.757160 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b09df012-4d2d-418a-8b1b-79247ce409f3" (UID: "b09df012-4d2d-418a-8b1b-79247ce409f3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.825139 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.825172 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwjnk\" (UniqueName: \"kubernetes.io/projected/b09df012-4d2d-418a-8b1b-79247ce409f3-kube-api-access-rwjnk\") on node \"crc\" DevicePath \"\"" Nov 26 17:37:17 crc kubenswrapper[5010]: I1126 17:37:17.825181 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b09df012-4d2d-418a-8b1b-79247ce409f3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.175974 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-openstack-openstack-cell1-8t624" event={"ID":"b09df012-4d2d-418a-8b1b-79247ce409f3","Type":"ContainerDied","Data":"1995e917867fbf541010a84e334918379000aaaccea15a42fdc04de4b5f8ca89"} Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.176359 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1995e917867fbf541010a84e334918379000aaaccea15a42fdc04de4b5f8ca89" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.178444 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-openstack-openstack-cell1-8t624" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.269470 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-9w5cw"] Nov 26 17:37:18 crc kubenswrapper[5010]: E1126 17:37:18.270320 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b09df012-4d2d-418a-8b1b-79247ce409f3" containerName="download-cache-openstack-openstack-cell1" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.270344 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b09df012-4d2d-418a-8b1b-79247ce409f3" containerName="download-cache-openstack-openstack-cell1" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.270699 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b09df012-4d2d-418a-8b1b-79247ce409f3" containerName="download-cache-openstack-openstack-cell1" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.271701 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.280019 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.280543 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.280845 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.281164 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.298153 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-9w5cw"] Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.335421 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-ssh-key\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.335565 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26ntf\" (UniqueName: \"kubernetes.io/projected/e4c8ec39-07f8-45d9-b135-175e573d1530-kube-api-access-26ntf\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.335606 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-inventory\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.437232 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26ntf\" (UniqueName: \"kubernetes.io/projected/e4c8ec39-07f8-45d9-b135-175e573d1530-kube-api-access-26ntf\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.437519 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-inventory\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.437741 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-ssh-key\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.441494 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-ssh-key\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.445501 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-inventory\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.456837 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26ntf\" (UniqueName: \"kubernetes.io/projected/e4c8ec39-07f8-45d9-b135-175e573d1530-kube-api-access-26ntf\") pod \"configure-network-openstack-openstack-cell1-9w5cw\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:18 crc kubenswrapper[5010]: I1126 17:37:18.600030 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:37:19 crc kubenswrapper[5010]: I1126 17:37:19.181903 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-openstack-openstack-cell1-9w5cw"] Nov 26 17:37:19 crc kubenswrapper[5010]: I1126 17:37:19.992901 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:37:20 crc kubenswrapper[5010]: I1126 17:37:20.196149 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" event={"ID":"e4c8ec39-07f8-45d9-b135-175e573d1530","Type":"ContainerStarted","Data":"7d9373110ae0425be46d203bea277bdc470262526238a6e5a79431196c2d6731"} Nov 26 17:37:21 crc kubenswrapper[5010]: I1126 17:37:21.208529 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" event={"ID":"e4c8ec39-07f8-45d9-b135-175e573d1530","Type":"ContainerStarted","Data":"dc2e8c998d90224e279e2f3427d73dba3d13bc1f9c4701614a8f67c4a7a862e8"} Nov 26 17:37:21 crc kubenswrapper[5010]: I1126 17:37:21.232927 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" podStartSLOduration=2.439312104 podStartE2EDuration="3.232908701s" podCreationTimestamp="2025-11-26 17:37:18 +0000 UTC" firstStartedPulling="2025-11-26 17:37:19.196423549 +0000 UTC m=+7859.987140697" lastFinishedPulling="2025-11-26 17:37:19.990020146 +0000 UTC m=+7860.780737294" observedRunningTime="2025-11-26 17:37:21.224962173 +0000 UTC m=+7862.015679331" watchObservedRunningTime="2025-11-26 17:37:21.232908701 +0000 UTC m=+7862.023625849" Nov 26 17:37:26 crc kubenswrapper[5010]: I1126 17:37:26.892738 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:37:26 crc kubenswrapper[5010]: E1126 17:37:26.893467 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:37:39 crc kubenswrapper[5010]: I1126 17:37:39.898901 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:37:39 crc kubenswrapper[5010]: E1126 17:37:39.899852 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:37:53 crc kubenswrapper[5010]: I1126 17:37:53.892265 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:37:53 crc kubenswrapper[5010]: E1126 17:37:53.893938 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:38:08 crc kubenswrapper[5010]: I1126 17:38:08.892557 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:38:08 crc kubenswrapper[5010]: E1126 17:38:08.895686 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:38:21 crc kubenswrapper[5010]: I1126 17:38:21.891859 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:38:21 crc kubenswrapper[5010]: E1126 17:38:21.894114 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:38:33 crc kubenswrapper[5010]: I1126 17:38:33.892056 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:38:33 crc kubenswrapper[5010]: E1126 17:38:33.892900 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:38:38 crc kubenswrapper[5010]: I1126 17:38:38.400500 5010 generic.go:334] "Generic (PLEG): container finished" podID="e4c8ec39-07f8-45d9-b135-175e573d1530" containerID="dc2e8c998d90224e279e2f3427d73dba3d13bc1f9c4701614a8f67c4a7a862e8" exitCode=0 Nov 26 17:38:38 crc kubenswrapper[5010]: I1126 17:38:38.400592 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" event={"ID":"e4c8ec39-07f8-45d9-b135-175e573d1530","Type":"ContainerDied","Data":"dc2e8c998d90224e279e2f3427d73dba3d13bc1f9c4701614a8f67c4a7a862e8"} Nov 26 17:38:39 crc kubenswrapper[5010]: I1126 17:38:39.901327 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.062363 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26ntf\" (UniqueName: \"kubernetes.io/projected/e4c8ec39-07f8-45d9-b135-175e573d1530-kube-api-access-26ntf\") pod \"e4c8ec39-07f8-45d9-b135-175e573d1530\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.062521 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-ssh-key\") pod \"e4c8ec39-07f8-45d9-b135-175e573d1530\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.062647 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-inventory\") pod \"e4c8ec39-07f8-45d9-b135-175e573d1530\" (UID: \"e4c8ec39-07f8-45d9-b135-175e573d1530\") " Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.067877 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e4c8ec39-07f8-45d9-b135-175e573d1530-kube-api-access-26ntf" (OuterVolumeSpecName: "kube-api-access-26ntf") pod "e4c8ec39-07f8-45d9-b135-175e573d1530" (UID: "e4c8ec39-07f8-45d9-b135-175e573d1530"). InnerVolumeSpecName "kube-api-access-26ntf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.093439 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-inventory" (OuterVolumeSpecName: "inventory") pod "e4c8ec39-07f8-45d9-b135-175e573d1530" (UID: "e4c8ec39-07f8-45d9-b135-175e573d1530"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.094136 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e4c8ec39-07f8-45d9-b135-175e573d1530" (UID: "e4c8ec39-07f8-45d9-b135-175e573d1530"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.165017 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26ntf\" (UniqueName: \"kubernetes.io/projected/e4c8ec39-07f8-45d9-b135-175e573d1530-kube-api-access-26ntf\") on node \"crc\" DevicePath \"\"" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.165225 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.165283 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e4c8ec39-07f8-45d9-b135-175e573d1530-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.424585 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" event={"ID":"e4c8ec39-07f8-45d9-b135-175e573d1530","Type":"ContainerDied","Data":"7d9373110ae0425be46d203bea277bdc470262526238a6e5a79431196c2d6731"} Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.424637 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-openstack-openstack-cell1-9w5cw" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.424642 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7d9373110ae0425be46d203bea277bdc470262526238a6e5a79431196c2d6731" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.556445 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-rrchd"] Nov 26 17:38:40 crc kubenswrapper[5010]: E1126 17:38:40.556987 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e4c8ec39-07f8-45d9-b135-175e573d1530" containerName="configure-network-openstack-openstack-cell1" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.557016 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e4c8ec39-07f8-45d9-b135-175e573d1530" containerName="configure-network-openstack-openstack-cell1" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.557303 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e4c8ec39-07f8-45d9-b135-175e573d1530" containerName="configure-network-openstack-openstack-cell1" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.558257 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.560954 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.562419 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.563452 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.572806 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.574107 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-rrchd"] Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.676162 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-ssh-key\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.676211 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mwfkg\" (UniqueName: \"kubernetes.io/projected/0decd137-74df-4dea-81f5-4b5431d96871-kube-api-access-mwfkg\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.676287 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-inventory\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.778306 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-ssh-key\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.778352 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mwfkg\" (UniqueName: \"kubernetes.io/projected/0decd137-74df-4dea-81f5-4b5431d96871-kube-api-access-mwfkg\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.778415 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-inventory\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.783754 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-ssh-key\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.783835 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-inventory\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.801034 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mwfkg\" (UniqueName: \"kubernetes.io/projected/0decd137-74df-4dea-81f5-4b5431d96871-kube-api-access-mwfkg\") pod \"validate-network-openstack-openstack-cell1-rrchd\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:40 crc kubenswrapper[5010]: I1126 17:38:40.878634 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:41 crc kubenswrapper[5010]: I1126 17:38:41.424041 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-openstack-openstack-cell1-rrchd"] Nov 26 17:38:41 crc kubenswrapper[5010]: I1126 17:38:41.437976 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" event={"ID":"0decd137-74df-4dea-81f5-4b5431d96871","Type":"ContainerStarted","Data":"5f2df761db94f2710aa65b174bf18039aa086150eb93b77b8c2f956e52770539"} Nov 26 17:38:42 crc kubenswrapper[5010]: I1126 17:38:42.449341 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" event={"ID":"0decd137-74df-4dea-81f5-4b5431d96871","Type":"ContainerStarted","Data":"4a349193e9a9748e4a5e9cf270040f3d963cf59094442749b53e1e7676b0c0d6"} Nov 26 17:38:42 crc kubenswrapper[5010]: I1126 17:38:42.480667 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" podStartSLOduration=1.95115777 podStartE2EDuration="2.480645199s" podCreationTimestamp="2025-11-26 17:38:40 +0000 UTC" firstStartedPulling="2025-11-26 17:38:41.429811504 +0000 UTC m=+7942.220528692" lastFinishedPulling="2025-11-26 17:38:41.959298963 +0000 UTC m=+7942.750016121" observedRunningTime="2025-11-26 17:38:42.471102222 +0000 UTC m=+7943.261819370" watchObservedRunningTime="2025-11-26 17:38:42.480645199 +0000 UTC m=+7943.271362357" Nov 26 17:38:47 crc kubenswrapper[5010]: I1126 17:38:47.507682 5010 generic.go:334] "Generic (PLEG): container finished" podID="0decd137-74df-4dea-81f5-4b5431d96871" containerID="4a349193e9a9748e4a5e9cf270040f3d963cf59094442749b53e1e7676b0c0d6" exitCode=0 Nov 26 17:38:47 crc kubenswrapper[5010]: I1126 17:38:47.507782 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" event={"ID":"0decd137-74df-4dea-81f5-4b5431d96871","Type":"ContainerDied","Data":"4a349193e9a9748e4a5e9cf270040f3d963cf59094442749b53e1e7676b0c0d6"} Nov 26 17:38:47 crc kubenswrapper[5010]: I1126 17:38:47.892438 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:38:47 crc kubenswrapper[5010]: E1126 17:38:47.893047 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.035933 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.162151 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-ssh-key\") pod \"0decd137-74df-4dea-81f5-4b5431d96871\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.162233 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-inventory\") pod \"0decd137-74df-4dea-81f5-4b5431d96871\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.162340 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mwfkg\" (UniqueName: \"kubernetes.io/projected/0decd137-74df-4dea-81f5-4b5431d96871-kube-api-access-mwfkg\") pod \"0decd137-74df-4dea-81f5-4b5431d96871\" (UID: \"0decd137-74df-4dea-81f5-4b5431d96871\") " Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.171103 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0decd137-74df-4dea-81f5-4b5431d96871-kube-api-access-mwfkg" (OuterVolumeSpecName: "kube-api-access-mwfkg") pod "0decd137-74df-4dea-81f5-4b5431d96871" (UID: "0decd137-74df-4dea-81f5-4b5431d96871"). InnerVolumeSpecName "kube-api-access-mwfkg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.199516 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-inventory" (OuterVolumeSpecName: "inventory") pod "0decd137-74df-4dea-81f5-4b5431d96871" (UID: "0decd137-74df-4dea-81f5-4b5431d96871"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.211115 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0decd137-74df-4dea-81f5-4b5431d96871" (UID: "0decd137-74df-4dea-81f5-4b5431d96871"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.265109 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mwfkg\" (UniqueName: \"kubernetes.io/projected/0decd137-74df-4dea-81f5-4b5431d96871-kube-api-access-mwfkg\") on node \"crc\" DevicePath \"\"" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.265159 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.265172 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0decd137-74df-4dea-81f5-4b5431d96871-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.529873 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" event={"ID":"0decd137-74df-4dea-81f5-4b5431d96871","Type":"ContainerDied","Data":"5f2df761db94f2710aa65b174bf18039aa086150eb93b77b8c2f956e52770539"} Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.529920 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5f2df761db94f2710aa65b174bf18039aa086150eb93b77b8c2f956e52770539" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.529992 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-openstack-openstack-cell1-rrchd" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.597309 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-openstack-openstack-cell1-dgxbz"] Nov 26 17:38:49 crc kubenswrapper[5010]: E1126 17:38:49.597771 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0decd137-74df-4dea-81f5-4b5431d96871" containerName="validate-network-openstack-openstack-cell1" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.597787 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0decd137-74df-4dea-81f5-4b5431d96871" containerName="validate-network-openstack-openstack-cell1" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.598333 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0decd137-74df-4dea-81f5-4b5431d96871" containerName="validate-network-openstack-openstack-cell1" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.599043 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.601736 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.602216 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.602572 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.605139 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.612132 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-dgxbz"] Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.674075 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-ssh-key\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.674383 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-inventory\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.674742 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6sd9d\" (UniqueName: \"kubernetes.io/projected/16688d62-61dc-4d17-9540-35697c945721-kube-api-access-6sd9d\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.777333 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6sd9d\" (UniqueName: \"kubernetes.io/projected/16688d62-61dc-4d17-9540-35697c945721-kube-api-access-6sd9d\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.777789 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-ssh-key\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.777915 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-inventory\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.783018 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-ssh-key\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.783445 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-inventory\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.795286 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6sd9d\" (UniqueName: \"kubernetes.io/projected/16688d62-61dc-4d17-9540-35697c945721-kube-api-access-6sd9d\") pod \"install-os-openstack-openstack-cell1-dgxbz\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:49 crc kubenswrapper[5010]: I1126 17:38:49.933268 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:38:50 crc kubenswrapper[5010]: I1126 17:38:50.483206 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-openstack-openstack-cell1-dgxbz"] Nov 26 17:38:50 crc kubenswrapper[5010]: I1126 17:38:50.537832 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" event={"ID":"16688d62-61dc-4d17-9540-35697c945721","Type":"ContainerStarted","Data":"cb7196a1b215607268debd98b4d237ea5404a062010656a4b617934587bc7124"} Nov 26 17:38:51 crc kubenswrapper[5010]: I1126 17:38:51.549053 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" event={"ID":"16688d62-61dc-4d17-9540-35697c945721","Type":"ContainerStarted","Data":"b7dbc29af8e3fd41c33bb1094d7ba445451ecb7895f22297e4792f42be2bd76d"} Nov 26 17:38:51 crc kubenswrapper[5010]: I1126 17:38:51.577426 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" podStartSLOduration=2.067916291 podStartE2EDuration="2.577399953s" podCreationTimestamp="2025-11-26 17:38:49 +0000 UTC" firstStartedPulling="2025-11-26 17:38:50.487767845 +0000 UTC m=+7951.278484993" lastFinishedPulling="2025-11-26 17:38:50.997251497 +0000 UTC m=+7951.787968655" observedRunningTime="2025-11-26 17:38:51.56396853 +0000 UTC m=+7952.354685718" watchObservedRunningTime="2025-11-26 17:38:51.577399953 +0000 UTC m=+7952.368117131" Nov 26 17:38:58 crc kubenswrapper[5010]: I1126 17:38:58.891588 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:38:58 crc kubenswrapper[5010]: E1126 17:38:58.892826 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:39:13 crc kubenswrapper[5010]: I1126 17:39:13.893798 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:39:13 crc kubenswrapper[5010]: E1126 17:39:13.896607 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:39:24 crc kubenswrapper[5010]: I1126 17:39:24.891891 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:39:24 crc kubenswrapper[5010]: E1126 17:39:24.892927 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:39:35 crc kubenswrapper[5010]: I1126 17:39:35.892245 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:39:35 crc kubenswrapper[5010]: E1126 17:39:35.893860 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:39:37 crc kubenswrapper[5010]: I1126 17:39:37.076253 5010 generic.go:334] "Generic (PLEG): container finished" podID="16688d62-61dc-4d17-9540-35697c945721" containerID="b7dbc29af8e3fd41c33bb1094d7ba445451ecb7895f22297e4792f42be2bd76d" exitCode=0 Nov 26 17:39:37 crc kubenswrapper[5010]: I1126 17:39:37.076346 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" event={"ID":"16688d62-61dc-4d17-9540-35697c945721","Type":"ContainerDied","Data":"b7dbc29af8e3fd41c33bb1094d7ba445451ecb7895f22297e4792f42be2bd76d"} Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.699359 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.773100 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-ssh-key\") pod \"16688d62-61dc-4d17-9540-35697c945721\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.773261 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6sd9d\" (UniqueName: \"kubernetes.io/projected/16688d62-61dc-4d17-9540-35697c945721-kube-api-access-6sd9d\") pod \"16688d62-61dc-4d17-9540-35697c945721\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.773280 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-inventory\") pod \"16688d62-61dc-4d17-9540-35697c945721\" (UID: \"16688d62-61dc-4d17-9540-35697c945721\") " Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.788570 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16688d62-61dc-4d17-9540-35697c945721-kube-api-access-6sd9d" (OuterVolumeSpecName: "kube-api-access-6sd9d") pod "16688d62-61dc-4d17-9540-35697c945721" (UID: "16688d62-61dc-4d17-9540-35697c945721"). InnerVolumeSpecName "kube-api-access-6sd9d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.806836 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "16688d62-61dc-4d17-9540-35697c945721" (UID: "16688d62-61dc-4d17-9540-35697c945721"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.808721 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-inventory" (OuterVolumeSpecName: "inventory") pod "16688d62-61dc-4d17-9540-35697c945721" (UID: "16688d62-61dc-4d17-9540-35697c945721"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.875931 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.875981 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6sd9d\" (UniqueName: \"kubernetes.io/projected/16688d62-61dc-4d17-9540-35697c945721-kube-api-access-6sd9d\") on node \"crc\" DevicePath \"\"" Nov 26 17:39:38 crc kubenswrapper[5010]: I1126 17:39:38.875999 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/16688d62-61dc-4d17-9540-35697c945721-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.118828 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" event={"ID":"16688d62-61dc-4d17-9540-35697c945721","Type":"ContainerDied","Data":"cb7196a1b215607268debd98b4d237ea5404a062010656a4b617934587bc7124"} Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.118888 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cb7196a1b215607268debd98b4d237ea5404a062010656a4b617934587bc7124" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.119366 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-openstack-openstack-cell1-dgxbz" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.200225 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-fxj9x"] Nov 26 17:39:39 crc kubenswrapper[5010]: E1126 17:39:39.200867 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16688d62-61dc-4d17-9540-35697c945721" containerName="install-os-openstack-openstack-cell1" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.200892 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="16688d62-61dc-4d17-9540-35697c945721" containerName="install-os-openstack-openstack-cell1" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.201198 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="16688d62-61dc-4d17-9540-35697c945721" containerName="install-os-openstack-openstack-cell1" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.202391 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.208027 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.208209 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.208457 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.212557 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.212968 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-fxj9x"] Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.285004 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6t8v\" (UniqueName: \"kubernetes.io/projected/d007a94e-fb49-436e-b5ca-ae0c5e791540-kube-api-access-n6t8v\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.285061 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-ssh-key\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.285168 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-inventory\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.387323 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-inventory\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.387633 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6t8v\" (UniqueName: \"kubernetes.io/projected/d007a94e-fb49-436e-b5ca-ae0c5e791540-kube-api-access-n6t8v\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.387684 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-ssh-key\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.393442 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-inventory\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.393777 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-ssh-key\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.405121 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6t8v\" (UniqueName: \"kubernetes.io/projected/d007a94e-fb49-436e-b5ca-ae0c5e791540-kube-api-access-n6t8v\") pod \"configure-os-openstack-openstack-cell1-fxj9x\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:39 crc kubenswrapper[5010]: I1126 17:39:39.523356 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:39:40 crc kubenswrapper[5010]: I1126 17:39:40.194451 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-openstack-openstack-cell1-fxj9x"] Nov 26 17:39:41 crc kubenswrapper[5010]: I1126 17:39:41.147295 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" event={"ID":"d007a94e-fb49-436e-b5ca-ae0c5e791540","Type":"ContainerStarted","Data":"d044b06e3e79c6ee885ad9b1fa1ffacf51b3dbcd492ac10ded88db0b05dd8d59"} Nov 26 17:39:41 crc kubenswrapper[5010]: I1126 17:39:41.148120 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" event={"ID":"d007a94e-fb49-436e-b5ca-ae0c5e791540","Type":"ContainerStarted","Data":"0a83cc5f2ebc70b0b2775f3b42b38fcb79b5134d400c137a08f58dd128578af9"} Nov 26 17:39:41 crc kubenswrapper[5010]: I1126 17:39:41.185940 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" podStartSLOduration=1.645847966 podStartE2EDuration="2.185910408s" podCreationTimestamp="2025-11-26 17:39:39 +0000 UTC" firstStartedPulling="2025-11-26 17:39:40.201958523 +0000 UTC m=+8000.992675671" lastFinishedPulling="2025-11-26 17:39:40.742020955 +0000 UTC m=+8001.532738113" observedRunningTime="2025-11-26 17:39:41.175593511 +0000 UTC m=+8001.966310679" watchObservedRunningTime="2025-11-26 17:39:41.185910408 +0000 UTC m=+8001.976627576" Nov 26 17:39:48 crc kubenswrapper[5010]: I1126 17:39:48.891559 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:39:48 crc kubenswrapper[5010]: E1126 17:39:48.892677 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:40:03 crc kubenswrapper[5010]: I1126 17:40:03.891563 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:40:03 crc kubenswrapper[5010]: E1126 17:40:03.892232 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:40:14 crc kubenswrapper[5010]: I1126 17:40:14.892269 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:40:14 crc kubenswrapper[5010]: E1126 17:40:14.893530 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.297363 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6qq4v"] Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.328078 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.329416 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6qq4v"] Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.484006 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-utilities\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.484098 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nl2tp\" (UniqueName: \"kubernetes.io/projected/2f9c3b7c-c68c-46d9-bb79-6843a982827f-kube-api-access-nl2tp\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.484335 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-catalog-content\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.499212 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7xmsf"] Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.501658 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.513471 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7xmsf"] Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.585830 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-catalog-content\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.585975 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-utilities\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.586006 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nl2tp\" (UniqueName: \"kubernetes.io/projected/2f9c3b7c-c68c-46d9-bb79-6843a982827f-kube-api-access-nl2tp\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.586830 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-utilities\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.588656 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-catalog-content\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.610243 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nl2tp\" (UniqueName: \"kubernetes.io/projected/2f9c3b7c-c68c-46d9-bb79-6843a982827f-kube-api-access-nl2tp\") pod \"certified-operators-6qq4v\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.657353 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.687951 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-utilities\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.688097 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgvv2\" (UniqueName: \"kubernetes.io/projected/53a81e07-adc4-4915-a1f1-871188924729-kube-api-access-mgvv2\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.688241 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-catalog-content\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.791955 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-utilities\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.792254 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgvv2\" (UniqueName: \"kubernetes.io/projected/53a81e07-adc4-4915-a1f1-871188924729-kube-api-access-mgvv2\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.792354 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-catalog-content\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.792721 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-catalog-content\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.794638 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-utilities\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.826431 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgvv2\" (UniqueName: \"kubernetes.io/projected/53a81e07-adc4-4915-a1f1-871188924729-kube-api-access-mgvv2\") pod \"redhat-operators-7xmsf\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:24 crc kubenswrapper[5010]: I1126 17:40:24.841735 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:25 crc kubenswrapper[5010]: I1126 17:40:25.246073 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6qq4v"] Nov 26 17:40:25 crc kubenswrapper[5010]: I1126 17:40:25.520074 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7xmsf"] Nov 26 17:40:25 crc kubenswrapper[5010]: W1126 17:40:25.543254 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod53a81e07_adc4_4915_a1f1_871188924729.slice/crio-d524f25bcbe989bad1e6621dbaadd05dee093db288bfe1ef78f65ab19d34375c WatchSource:0}: Error finding container d524f25bcbe989bad1e6621dbaadd05dee093db288bfe1ef78f65ab19d34375c: Status 404 returned error can't find the container with id d524f25bcbe989bad1e6621dbaadd05dee093db288bfe1ef78f65ab19d34375c Nov 26 17:40:25 crc kubenswrapper[5010]: I1126 17:40:25.706390 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xmsf" event={"ID":"53a81e07-adc4-4915-a1f1-871188924729","Type":"ContainerStarted","Data":"d524f25bcbe989bad1e6621dbaadd05dee093db288bfe1ef78f65ab19d34375c"} Nov 26 17:40:25 crc kubenswrapper[5010]: I1126 17:40:25.710614 5010 generic.go:334] "Generic (PLEG): container finished" podID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerID="438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0" exitCode=0 Nov 26 17:40:25 crc kubenswrapper[5010]: I1126 17:40:25.710653 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6qq4v" event={"ID":"2f9c3b7c-c68c-46d9-bb79-6843a982827f","Type":"ContainerDied","Data":"438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0"} Nov 26 17:40:25 crc kubenswrapper[5010]: I1126 17:40:25.710682 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6qq4v" event={"ID":"2f9c3b7c-c68c-46d9-bb79-6843a982827f","Type":"ContainerStarted","Data":"484d506f902a3dd1d6b6116ea27209f82abf30d6281fcb2208ffd5d6f0f93986"} Nov 26 17:40:26 crc kubenswrapper[5010]: I1126 17:40:26.722851 5010 generic.go:334] "Generic (PLEG): container finished" podID="53a81e07-adc4-4915-a1f1-871188924729" containerID="ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081" exitCode=0 Nov 26 17:40:26 crc kubenswrapper[5010]: I1126 17:40:26.722972 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xmsf" event={"ID":"53a81e07-adc4-4915-a1f1-871188924729","Type":"ContainerDied","Data":"ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081"} Nov 26 17:40:26 crc kubenswrapper[5010]: I1126 17:40:26.726225 5010 generic.go:334] "Generic (PLEG): container finished" podID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerID="1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c" exitCode=0 Nov 26 17:40:26 crc kubenswrapper[5010]: I1126 17:40:26.726268 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6qq4v" event={"ID":"2f9c3b7c-c68c-46d9-bb79-6843a982827f","Type":"ContainerDied","Data":"1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c"} Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.721741 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-46sr9"] Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.726070 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.834806 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-46sr9"] Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.885850 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xmsf" event={"ID":"53a81e07-adc4-4915-a1f1-871188924729","Type":"ContainerStarted","Data":"89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f"} Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.897738 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-catalog-content\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.897849 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-utilities\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.897878 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8mkwc\" (UniqueName: \"kubernetes.io/projected/0881d7b0-51b7-4ef2-b913-d84ce615f12d-kube-api-access-8mkwc\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.899007 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:40:27 crc kubenswrapper[5010]: E1126 17:40:27.899257 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.934132 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6qq4v" event={"ID":"2f9c3b7c-c68c-46d9-bb79-6843a982827f","Type":"ContainerStarted","Data":"bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09"} Nov 26 17:40:27 crc kubenswrapper[5010]: I1126 17:40:27.965059 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6qq4v" podStartSLOduration=2.369443033 podStartE2EDuration="3.965041346s" podCreationTimestamp="2025-11-26 17:40:24 +0000 UTC" firstStartedPulling="2025-11-26 17:40:25.712840918 +0000 UTC m=+8046.503558066" lastFinishedPulling="2025-11-26 17:40:27.308439231 +0000 UTC m=+8048.099156379" observedRunningTime="2025-11-26 17:40:27.954856803 +0000 UTC m=+8048.745573961" watchObservedRunningTime="2025-11-26 17:40:27.965041346 +0000 UTC m=+8048.755758494" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.001319 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-utilities\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.001613 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8mkwc\" (UniqueName: \"kubernetes.io/projected/0881d7b0-51b7-4ef2-b913-d84ce615f12d-kube-api-access-8mkwc\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.001900 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-catalog-content\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.003802 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-catalog-content\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.003944 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-utilities\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.023059 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8mkwc\" (UniqueName: \"kubernetes.io/projected/0881d7b0-51b7-4ef2-b913-d84ce615f12d-kube-api-access-8mkwc\") pod \"community-operators-46sr9\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.067149 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.554821 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-46sr9"] Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.944230 5010 generic.go:334] "Generic (PLEG): container finished" podID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerID="d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe" exitCode=0 Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.944337 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sr9" event={"ID":"0881d7b0-51b7-4ef2-b913-d84ce615f12d","Type":"ContainerDied","Data":"d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe"} Nov 26 17:40:28 crc kubenswrapper[5010]: I1126 17:40:28.945656 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sr9" event={"ID":"0881d7b0-51b7-4ef2-b913-d84ce615f12d","Type":"ContainerStarted","Data":"4374971a2fad3fac4bd0ecc300a54a6cf2de813f4da9ba46a2636a17faf5b13b"} Nov 26 17:40:29 crc kubenswrapper[5010]: I1126 17:40:29.957014 5010 generic.go:334] "Generic (PLEG): container finished" podID="d007a94e-fb49-436e-b5ca-ae0c5e791540" containerID="d044b06e3e79c6ee885ad9b1fa1ffacf51b3dbcd492ac10ded88db0b05dd8d59" exitCode=0 Nov 26 17:40:29 crc kubenswrapper[5010]: I1126 17:40:29.957098 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" event={"ID":"d007a94e-fb49-436e-b5ca-ae0c5e791540","Type":"ContainerDied","Data":"d044b06e3e79c6ee885ad9b1fa1ffacf51b3dbcd492ac10ded88db0b05dd8d59"} Nov 26 17:40:30 crc kubenswrapper[5010]: I1126 17:40:30.969594 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sr9" event={"ID":"0881d7b0-51b7-4ef2-b913-d84ce615f12d","Type":"ContainerStarted","Data":"8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae"} Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.504642 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.524657 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6t8v\" (UniqueName: \"kubernetes.io/projected/d007a94e-fb49-436e-b5ca-ae0c5e791540-kube-api-access-n6t8v\") pod \"d007a94e-fb49-436e-b5ca-ae0c5e791540\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.524849 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-ssh-key\") pod \"d007a94e-fb49-436e-b5ca-ae0c5e791540\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.525119 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-inventory\") pod \"d007a94e-fb49-436e-b5ca-ae0c5e791540\" (UID: \"d007a94e-fb49-436e-b5ca-ae0c5e791540\") " Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.546683 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d007a94e-fb49-436e-b5ca-ae0c5e791540-kube-api-access-n6t8v" (OuterVolumeSpecName: "kube-api-access-n6t8v") pod "d007a94e-fb49-436e-b5ca-ae0c5e791540" (UID: "d007a94e-fb49-436e-b5ca-ae0c5e791540"). InnerVolumeSpecName "kube-api-access-n6t8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.563529 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d007a94e-fb49-436e-b5ca-ae0c5e791540" (UID: "d007a94e-fb49-436e-b5ca-ae0c5e791540"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.577941 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-inventory" (OuterVolumeSpecName: "inventory") pod "d007a94e-fb49-436e-b5ca-ae0c5e791540" (UID: "d007a94e-fb49-436e-b5ca-ae0c5e791540"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.627994 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6t8v\" (UniqueName: \"kubernetes.io/projected/d007a94e-fb49-436e-b5ca-ae0c5e791540-kube-api-access-n6t8v\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.628041 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.628055 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d007a94e-fb49-436e-b5ca-ae0c5e791540-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.983062 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" event={"ID":"d007a94e-fb49-436e-b5ca-ae0c5e791540","Type":"ContainerDied","Data":"0a83cc5f2ebc70b0b2775f3b42b38fcb79b5134d400c137a08f58dd128578af9"} Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.983362 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a83cc5f2ebc70b0b2775f3b42b38fcb79b5134d400c137a08f58dd128578af9" Nov 26 17:40:31 crc kubenswrapper[5010]: I1126 17:40:31.984511 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-openstack-openstack-cell1-fxj9x" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.069194 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-openstack-j7psw"] Nov 26 17:40:32 crc kubenswrapper[5010]: E1126 17:40:32.069673 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d007a94e-fb49-436e-b5ca-ae0c5e791540" containerName="configure-os-openstack-openstack-cell1" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.069690 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d007a94e-fb49-436e-b5ca-ae0c5e791540" containerName="configure-os-openstack-openstack-cell1" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.069954 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d007a94e-fb49-436e-b5ca-ae0c5e791540" containerName="configure-os-openstack-openstack-cell1" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.070734 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.073955 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.074225 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.074553 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.074812 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.097365 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-j7psw"] Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.141580 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-68drs\" (UniqueName: \"kubernetes.io/projected/2541fd34-09c5-44d6-aad0-f308b87d63aa-kube-api-access-68drs\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.141654 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-inventory-0\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.141722 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.243458 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-68drs\" (UniqueName: \"kubernetes.io/projected/2541fd34-09c5-44d6-aad0-f308b87d63aa-kube-api-access-68drs\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.243539 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-inventory-0\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.243599 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.248137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-inventory-0\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.257756 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-ssh-key-openstack-cell1\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.260868 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-68drs\" (UniqueName: \"kubernetes.io/projected/2541fd34-09c5-44d6-aad0-f308b87d63aa-kube-api-access-68drs\") pod \"ssh-known-hosts-openstack-j7psw\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.398491 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.996633 5010 generic.go:334] "Generic (PLEG): container finished" podID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerID="8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae" exitCode=0 Nov 26 17:40:32 crc kubenswrapper[5010]: I1126 17:40:32.996697 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sr9" event={"ID":"0881d7b0-51b7-4ef2-b913-d84ce615f12d","Type":"ContainerDied","Data":"8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae"} Nov 26 17:40:33 crc kubenswrapper[5010]: I1126 17:40:33.096289 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-openstack-j7psw"] Nov 26 17:40:33 crc kubenswrapper[5010]: W1126 17:40:33.144448 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2541fd34_09c5_44d6_aad0_f308b87d63aa.slice/crio-236a13ddd50b7054b9accb7098a98c7fa96719df016c51f7ed0a33f62f174767 WatchSource:0}: Error finding container 236a13ddd50b7054b9accb7098a98c7fa96719df016c51f7ed0a33f62f174767: Status 404 returned error can't find the container with id 236a13ddd50b7054b9accb7098a98c7fa96719df016c51f7ed0a33f62f174767 Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.019205 5010 generic.go:334] "Generic (PLEG): container finished" podID="53a81e07-adc4-4915-a1f1-871188924729" containerID="89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f" exitCode=0 Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.019591 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xmsf" event={"ID":"53a81e07-adc4-4915-a1f1-871188924729","Type":"ContainerDied","Data":"89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f"} Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.021691 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-j7psw" event={"ID":"2541fd34-09c5-44d6-aad0-f308b87d63aa","Type":"ContainerStarted","Data":"324b85fbc0ca68c98f8d5e4795d70ed059fd70f20fa4079c1a406d7d3a9e07e0"} Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.021757 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-j7psw" event={"ID":"2541fd34-09c5-44d6-aad0-f308b87d63aa","Type":"ContainerStarted","Data":"236a13ddd50b7054b9accb7098a98c7fa96719df016c51f7ed0a33f62f174767"} Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.067944 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-openstack-j7psw" podStartSLOduration=1.679112889 podStartE2EDuration="2.067923554s" podCreationTimestamp="2025-11-26 17:40:32 +0000 UTC" firstStartedPulling="2025-11-26 17:40:33.147335014 +0000 UTC m=+8053.938052182" lastFinishedPulling="2025-11-26 17:40:33.536145699 +0000 UTC m=+8054.326862847" observedRunningTime="2025-11-26 17:40:34.060073639 +0000 UTC m=+8054.850790797" watchObservedRunningTime="2025-11-26 17:40:34.067923554 +0000 UTC m=+8054.858640722" Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.665159 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.665542 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:34 crc kubenswrapper[5010]: I1126 17:40:34.730746 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:35 crc kubenswrapper[5010]: I1126 17:40:35.032955 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xmsf" event={"ID":"53a81e07-adc4-4915-a1f1-871188924729","Type":"ContainerStarted","Data":"ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73"} Nov 26 17:40:35 crc kubenswrapper[5010]: I1126 17:40:35.035825 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sr9" event={"ID":"0881d7b0-51b7-4ef2-b913-d84ce615f12d","Type":"ContainerStarted","Data":"bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf"} Nov 26 17:40:35 crc kubenswrapper[5010]: I1126 17:40:35.084696 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-46sr9" podStartSLOduration=3.105294293 podStartE2EDuration="8.084676333s" podCreationTimestamp="2025-11-26 17:40:27 +0000 UTC" firstStartedPulling="2025-11-26 17:40:28.94744611 +0000 UTC m=+8049.738163258" lastFinishedPulling="2025-11-26 17:40:33.92682815 +0000 UTC m=+8054.717545298" observedRunningTime="2025-11-26 17:40:35.077955066 +0000 UTC m=+8055.868672224" watchObservedRunningTime="2025-11-26 17:40:35.084676333 +0000 UTC m=+8055.875393481" Nov 26 17:40:35 crc kubenswrapper[5010]: I1126 17:40:35.090448 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:35 crc kubenswrapper[5010]: I1126 17:40:35.091977 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7xmsf" podStartSLOduration=3.338930678 podStartE2EDuration="11.091957954s" podCreationTimestamp="2025-11-26 17:40:24 +0000 UTC" firstStartedPulling="2025-11-26 17:40:26.724955621 +0000 UTC m=+8047.515672769" lastFinishedPulling="2025-11-26 17:40:34.477982887 +0000 UTC m=+8055.268700045" observedRunningTime="2025-11-26 17:40:35.052213147 +0000 UTC m=+8055.842930295" watchObservedRunningTime="2025-11-26 17:40:35.091957954 +0000 UTC m=+8055.882675102" Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.084627 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6qq4v"] Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.085154 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6qq4v" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="registry-server" containerID="cri-o://bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09" gracePeriod=2 Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.721306 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.799772 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-utilities\") pod \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.800128 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nl2tp\" (UniqueName: \"kubernetes.io/projected/2f9c3b7c-c68c-46d9-bb79-6843a982827f-kube-api-access-nl2tp\") pod \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.801220 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-catalog-content\") pod \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\" (UID: \"2f9c3b7c-c68c-46d9-bb79-6843a982827f\") " Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.806613 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f9c3b7c-c68c-46d9-bb79-6843a982827f-kube-api-access-nl2tp" (OuterVolumeSpecName: "kube-api-access-nl2tp") pod "2f9c3b7c-c68c-46d9-bb79-6843a982827f" (UID: "2f9c3b7c-c68c-46d9-bb79-6843a982827f"). InnerVolumeSpecName "kube-api-access-nl2tp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.810136 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-utilities" (OuterVolumeSpecName: "utilities") pod "2f9c3b7c-c68c-46d9-bb79-6843a982827f" (UID: "2f9c3b7c-c68c-46d9-bb79-6843a982827f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.893038 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f9c3b7c-c68c-46d9-bb79-6843a982827f" (UID: "2f9c3b7c-c68c-46d9-bb79-6843a982827f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.904839 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nl2tp\" (UniqueName: \"kubernetes.io/projected/2f9c3b7c-c68c-46d9-bb79-6843a982827f-kube-api-access-nl2tp\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.904877 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:37 crc kubenswrapper[5010]: I1126 17:40:37.904889 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9c3b7c-c68c-46d9-bb79-6843a982827f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.068614 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.068657 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.086360 5010 generic.go:334] "Generic (PLEG): container finished" podID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerID="bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09" exitCode=0 Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.086416 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6qq4v" event={"ID":"2f9c3b7c-c68c-46d9-bb79-6843a982827f","Type":"ContainerDied","Data":"bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09"} Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.086466 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6qq4v" event={"ID":"2f9c3b7c-c68c-46d9-bb79-6843a982827f","Type":"ContainerDied","Data":"484d506f902a3dd1d6b6116ea27209f82abf30d6281fcb2208ffd5d6f0f93986"} Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.086503 5010 scope.go:117] "RemoveContainer" containerID="bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.086828 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6qq4v" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.128899 5010 scope.go:117] "RemoveContainer" containerID="1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.130362 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.137543 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6qq4v"] Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.147179 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6qq4v"] Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.153655 5010 scope.go:117] "RemoveContainer" containerID="438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.220014 5010 scope.go:117] "RemoveContainer" containerID="bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09" Nov 26 17:40:38 crc kubenswrapper[5010]: E1126 17:40:38.220558 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09\": container with ID starting with bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09 not found: ID does not exist" containerID="bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.220600 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09"} err="failed to get container status \"bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09\": rpc error: code = NotFound desc = could not find container \"bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09\": container with ID starting with bb102936ddada24d46ab88466fbbd229985ae13d88955704f22ec7f08a035c09 not found: ID does not exist" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.220837 5010 scope.go:117] "RemoveContainer" containerID="1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c" Nov 26 17:40:38 crc kubenswrapper[5010]: E1126 17:40:38.222097 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c\": container with ID starting with 1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c not found: ID does not exist" containerID="1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.222200 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c"} err="failed to get container status \"1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c\": rpc error: code = NotFound desc = could not find container \"1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c\": container with ID starting with 1cc41a59abaff45aa88d233a829e44a1ade1eb93f9f60d4e1008db70727b083c not found: ID does not exist" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.222282 5010 scope.go:117] "RemoveContainer" containerID="438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0" Nov 26 17:40:38 crc kubenswrapper[5010]: E1126 17:40:38.222857 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0\": container with ID starting with 438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0 not found: ID does not exist" containerID="438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0" Nov 26 17:40:38 crc kubenswrapper[5010]: I1126 17:40:38.222892 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0"} err="failed to get container status \"438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0\": rpc error: code = NotFound desc = could not find container \"438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0\": container with ID starting with 438597a0e2459edc2f72d352ceb54b93a6fd438afa9a46caec97fed3ecd210e0 not found: ID does not exist" Nov 26 17:40:39 crc kubenswrapper[5010]: I1126 17:40:39.162636 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:39 crc kubenswrapper[5010]: I1126 17:40:39.907210 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" path="/var/lib/kubelet/pods/2f9c3b7c-c68c-46d9-bb79-6843a982827f/volumes" Nov 26 17:40:41 crc kubenswrapper[5010]: I1126 17:40:41.494413 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-46sr9"] Nov 26 17:40:41 crc kubenswrapper[5010]: I1126 17:40:41.495048 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-46sr9" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="registry-server" containerID="cri-o://bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf" gracePeriod=2 Nov 26 17:40:41 crc kubenswrapper[5010]: E1126 17:40:41.695806 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0881d7b0_51b7_4ef2_b913_d84ce615f12d.slice/crio-bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0881d7b0_51b7_4ef2_b913_d84ce615f12d.slice/crio-conmon-bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf.scope\": RecentStats: unable to find data in memory cache]" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.032300 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.110955 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-utilities\") pod \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.111186 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-catalog-content\") pod \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.111230 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8mkwc\" (UniqueName: \"kubernetes.io/projected/0881d7b0-51b7-4ef2-b913-d84ce615f12d-kube-api-access-8mkwc\") pod \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\" (UID: \"0881d7b0-51b7-4ef2-b913-d84ce615f12d\") " Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.111315 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-utilities" (OuterVolumeSpecName: "utilities") pod "0881d7b0-51b7-4ef2-b913-d84ce615f12d" (UID: "0881d7b0-51b7-4ef2-b913-d84ce615f12d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.112519 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.127076 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0881d7b0-51b7-4ef2-b913-d84ce615f12d-kube-api-access-8mkwc" (OuterVolumeSpecName: "kube-api-access-8mkwc") pod "0881d7b0-51b7-4ef2-b913-d84ce615f12d" (UID: "0881d7b0-51b7-4ef2-b913-d84ce615f12d"). InnerVolumeSpecName "kube-api-access-8mkwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.145779 5010 generic.go:334] "Generic (PLEG): container finished" podID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerID="bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf" exitCode=0 Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.145829 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sr9" event={"ID":"0881d7b0-51b7-4ef2-b913-d84ce615f12d","Type":"ContainerDied","Data":"bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf"} Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.145859 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-46sr9" event={"ID":"0881d7b0-51b7-4ef2-b913-d84ce615f12d","Type":"ContainerDied","Data":"4374971a2fad3fac4bd0ecc300a54a6cf2de813f4da9ba46a2636a17faf5b13b"} Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.145881 5010 scope.go:117] "RemoveContainer" containerID="bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.145882 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-46sr9" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.181555 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0881d7b0-51b7-4ef2-b913-d84ce615f12d" (UID: "0881d7b0-51b7-4ef2-b913-d84ce615f12d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.195805 5010 scope.go:117] "RemoveContainer" containerID="8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.214390 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0881d7b0-51b7-4ef2-b913-d84ce615f12d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.214418 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8mkwc\" (UniqueName: \"kubernetes.io/projected/0881d7b0-51b7-4ef2-b913-d84ce615f12d-kube-api-access-8mkwc\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.217763 5010 scope.go:117] "RemoveContainer" containerID="d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.267136 5010 scope.go:117] "RemoveContainer" containerID="bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf" Nov 26 17:40:42 crc kubenswrapper[5010]: E1126 17:40:42.267720 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf\": container with ID starting with bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf not found: ID does not exist" containerID="bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.267774 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf"} err="failed to get container status \"bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf\": rpc error: code = NotFound desc = could not find container \"bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf\": container with ID starting with bd938a24292e48053cf704db7d90916270a4512f85f3af4a30d12e8c162b29bf not found: ID does not exist" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.267806 5010 scope.go:117] "RemoveContainer" containerID="8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae" Nov 26 17:40:42 crc kubenswrapper[5010]: E1126 17:40:42.268271 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae\": container with ID starting with 8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae not found: ID does not exist" containerID="8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.268298 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae"} err="failed to get container status \"8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae\": rpc error: code = NotFound desc = could not find container \"8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae\": container with ID starting with 8b861c47ff83cadecb2cf14f103c9370a7d67b38fb703fc4e75f0dbe182abbae not found: ID does not exist" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.268318 5010 scope.go:117] "RemoveContainer" containerID="d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe" Nov 26 17:40:42 crc kubenswrapper[5010]: E1126 17:40:42.268992 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe\": container with ID starting with d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe not found: ID does not exist" containerID="d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.269030 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe"} err="failed to get container status \"d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe\": rpc error: code = NotFound desc = could not find container \"d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe\": container with ID starting with d367730a0b923e5248cec22ac9b26b8abb0d9ad34d0e67663fa6cc99cf7aeefe not found: ID does not exist" Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.499323 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-46sr9"] Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.509110 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-46sr9"] Nov 26 17:40:42 crc kubenswrapper[5010]: I1126 17:40:42.892008 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:40:43 crc kubenswrapper[5010]: I1126 17:40:43.159864 5010 generic.go:334] "Generic (PLEG): container finished" podID="2541fd34-09c5-44d6-aad0-f308b87d63aa" containerID="324b85fbc0ca68c98f8d5e4795d70ed059fd70f20fa4079c1a406d7d3a9e07e0" exitCode=0 Nov 26 17:40:43 crc kubenswrapper[5010]: I1126 17:40:43.160000 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-j7psw" event={"ID":"2541fd34-09c5-44d6-aad0-f308b87d63aa","Type":"ContainerDied","Data":"324b85fbc0ca68c98f8d5e4795d70ed059fd70f20fa4079c1a406d7d3a9e07e0"} Nov 26 17:40:43 crc kubenswrapper[5010]: I1126 17:40:43.165937 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"1e137fb0baffea03d92cc4f06527eedfbf4028b857c4698a78480b1bc12370d8"} Nov 26 17:40:43 crc kubenswrapper[5010]: I1126 17:40:43.922524 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" path="/var/lib/kubelet/pods/0881d7b0-51b7-4ef2-b913-d84ce615f12d/volumes" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.636179 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.673960 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-inventory-0\") pod \"2541fd34-09c5-44d6-aad0-f308b87d63aa\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.674040 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-ssh-key-openstack-cell1\") pod \"2541fd34-09c5-44d6-aad0-f308b87d63aa\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.674187 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-68drs\" (UniqueName: \"kubernetes.io/projected/2541fd34-09c5-44d6-aad0-f308b87d63aa-kube-api-access-68drs\") pod \"2541fd34-09c5-44d6-aad0-f308b87d63aa\" (UID: \"2541fd34-09c5-44d6-aad0-f308b87d63aa\") " Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.682046 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2541fd34-09c5-44d6-aad0-f308b87d63aa-kube-api-access-68drs" (OuterVolumeSpecName: "kube-api-access-68drs") pod "2541fd34-09c5-44d6-aad0-f308b87d63aa" (UID: "2541fd34-09c5-44d6-aad0-f308b87d63aa"). InnerVolumeSpecName "kube-api-access-68drs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.733680 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "2541fd34-09c5-44d6-aad0-f308b87d63aa" (UID: "2541fd34-09c5-44d6-aad0-f308b87d63aa"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.735911 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-ssh-key-openstack-cell1" (OuterVolumeSpecName: "ssh-key-openstack-cell1") pod "2541fd34-09c5-44d6-aad0-f308b87d63aa" (UID: "2541fd34-09c5-44d6-aad0-f308b87d63aa"). InnerVolumeSpecName "ssh-key-openstack-cell1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.778547 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-68drs\" (UniqueName: \"kubernetes.io/projected/2541fd34-09c5-44d6-aad0-f308b87d63aa-kube-api-access-68drs\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.778904 5010 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.778918 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-cell1\" (UniqueName: \"kubernetes.io/secret/2541fd34-09c5-44d6-aad0-f308b87d63aa-ssh-key-openstack-cell1\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.843007 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.843083 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:44 crc kubenswrapper[5010]: I1126 17:40:44.918988 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.194641 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-openstack-j7psw" event={"ID":"2541fd34-09c5-44d6-aad0-f308b87d63aa","Type":"ContainerDied","Data":"236a13ddd50b7054b9accb7098a98c7fa96719df016c51f7ed0a33f62f174767"} Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.194694 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-openstack-j7psw" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.194777 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="236a13ddd50b7054b9accb7098a98c7fa96719df016c51f7ed0a33f62f174767" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.254433 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-openstack-openstack-cell1-dzfdn"] Nov 26 17:40:45 crc kubenswrapper[5010]: E1126 17:40:45.254994 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="extract-content" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255010 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="extract-content" Nov 26 17:40:45 crc kubenswrapper[5010]: E1126 17:40:45.255039 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="registry-server" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255046 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="registry-server" Nov 26 17:40:45 crc kubenswrapper[5010]: E1126 17:40:45.255065 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="extract-content" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255072 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="extract-content" Nov 26 17:40:45 crc kubenswrapper[5010]: E1126 17:40:45.255085 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="registry-server" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255091 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="registry-server" Nov 26 17:40:45 crc kubenswrapper[5010]: E1126 17:40:45.255116 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="extract-utilities" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255124 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="extract-utilities" Nov 26 17:40:45 crc kubenswrapper[5010]: E1126 17:40:45.255144 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="extract-utilities" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255151 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="extract-utilities" Nov 26 17:40:45 crc kubenswrapper[5010]: E1126 17:40:45.255160 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2541fd34-09c5-44d6-aad0-f308b87d63aa" containerName="ssh-known-hosts-openstack" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255165 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="2541fd34-09c5-44d6-aad0-f308b87d63aa" containerName="ssh-known-hosts-openstack" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255357 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0881d7b0-51b7-4ef2-b913-d84ce615f12d" containerName="registry-server" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255379 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f9c3b7c-c68c-46d9-bb79-6843a982827f" containerName="registry-server" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.255391 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="2541fd34-09c5-44d6-aad0-f308b87d63aa" containerName="ssh-known-hosts-openstack" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.256248 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.259734 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.259941 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.261489 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.261516 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.272769 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-dzfdn"] Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.300749 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7ds6d\" (UniqueName: \"kubernetes.io/projected/78e89a72-3a79-4431-aa18-ea2e358242ec-kube-api-access-7ds6d\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.300853 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-inventory\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.300936 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-ssh-key\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.310995 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.402553 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-ssh-key\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.403046 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7ds6d\" (UniqueName: \"kubernetes.io/projected/78e89a72-3a79-4431-aa18-ea2e358242ec-kube-api-access-7ds6d\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.403161 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-inventory\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.406606 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-ssh-key\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.410537 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-inventory\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.420470 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7ds6d\" (UniqueName: \"kubernetes.io/projected/78e89a72-3a79-4431-aa18-ea2e358242ec-kube-api-access-7ds6d\") pod \"run-os-openstack-openstack-cell1-dzfdn\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.636575 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:45 crc kubenswrapper[5010]: I1126 17:40:45.705250 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7xmsf"] Nov 26 17:40:46 crc kubenswrapper[5010]: I1126 17:40:46.249235 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-openstack-openstack-cell1-dzfdn"] Nov 26 17:40:46 crc kubenswrapper[5010]: I1126 17:40:46.255019 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.214215 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" event={"ID":"78e89a72-3a79-4431-aa18-ea2e358242ec","Type":"ContainerStarted","Data":"73025054788ee901182addb2045d90a2d1fd4f2003d261331462563832399c6f"} Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.214827 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" event={"ID":"78e89a72-3a79-4431-aa18-ea2e358242ec","Type":"ContainerStarted","Data":"6fe1aea72c04bdde6430c34a88598f63585846bf2fe8851002da694d52fecf66"} Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.214279 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7xmsf" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="registry-server" containerID="cri-o://ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73" gracePeriod=2 Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.238180 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" podStartSLOduration=1.708868511 podStartE2EDuration="2.238156174s" podCreationTimestamp="2025-11-26 17:40:45 +0000 UTC" firstStartedPulling="2025-11-26 17:40:46.25459295 +0000 UTC m=+8067.045310098" lastFinishedPulling="2025-11-26 17:40:46.783880613 +0000 UTC m=+8067.574597761" observedRunningTime="2025-11-26 17:40:47.231703693 +0000 UTC m=+8068.022420841" watchObservedRunningTime="2025-11-26 17:40:47.238156174 +0000 UTC m=+8068.028873322" Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.686677 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.752657 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-catalog-content\") pod \"53a81e07-adc4-4915-a1f1-871188924729\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.752888 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgvv2\" (UniqueName: \"kubernetes.io/projected/53a81e07-adc4-4915-a1f1-871188924729-kube-api-access-mgvv2\") pod \"53a81e07-adc4-4915-a1f1-871188924729\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.753131 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-utilities\") pod \"53a81e07-adc4-4915-a1f1-871188924729\" (UID: \"53a81e07-adc4-4915-a1f1-871188924729\") " Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.754830 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-utilities" (OuterVolumeSpecName: "utilities") pod "53a81e07-adc4-4915-a1f1-871188924729" (UID: "53a81e07-adc4-4915-a1f1-871188924729"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.768248 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53a81e07-adc4-4915-a1f1-871188924729-kube-api-access-mgvv2" (OuterVolumeSpecName: "kube-api-access-mgvv2") pod "53a81e07-adc4-4915-a1f1-871188924729" (UID: "53a81e07-adc4-4915-a1f1-871188924729"). InnerVolumeSpecName "kube-api-access-mgvv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.855946 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.856235 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgvv2\" (UniqueName: \"kubernetes.io/projected/53a81e07-adc4-4915-a1f1-871188924729-kube-api-access-mgvv2\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.864378 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "53a81e07-adc4-4915-a1f1-871188924729" (UID: "53a81e07-adc4-4915-a1f1-871188924729"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:40:47 crc kubenswrapper[5010]: I1126 17:40:47.958161 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53a81e07-adc4-4915-a1f1-871188924729-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.228404 5010 generic.go:334] "Generic (PLEG): container finished" podID="53a81e07-adc4-4915-a1f1-871188924729" containerID="ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73" exitCode=0 Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.229109 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7xmsf" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.229684 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xmsf" event={"ID":"53a81e07-adc4-4915-a1f1-871188924729","Type":"ContainerDied","Data":"ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73"} Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.229731 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7xmsf" event={"ID":"53a81e07-adc4-4915-a1f1-871188924729","Type":"ContainerDied","Data":"d524f25bcbe989bad1e6621dbaadd05dee093db288bfe1ef78f65ab19d34375c"} Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.229751 5010 scope.go:117] "RemoveContainer" containerID="ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.254420 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7xmsf"] Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.263235 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7xmsf"] Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.269829 5010 scope.go:117] "RemoveContainer" containerID="89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.305575 5010 scope.go:117] "RemoveContainer" containerID="ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.368336 5010 scope.go:117] "RemoveContainer" containerID="ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73" Nov 26 17:40:48 crc kubenswrapper[5010]: E1126 17:40:48.368905 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73\": container with ID starting with ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73 not found: ID does not exist" containerID="ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.368953 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73"} err="failed to get container status \"ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73\": rpc error: code = NotFound desc = could not find container \"ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73\": container with ID starting with ac150d4052a3031add3c5f7e5acdc9b32a0bf947767de45bec71c17e51bc3e73 not found: ID does not exist" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.368979 5010 scope.go:117] "RemoveContainer" containerID="89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f" Nov 26 17:40:48 crc kubenswrapper[5010]: E1126 17:40:48.369454 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f\": container with ID starting with 89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f not found: ID does not exist" containerID="89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.369495 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f"} err="failed to get container status \"89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f\": rpc error: code = NotFound desc = could not find container \"89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f\": container with ID starting with 89a5cc375ebc3586220a0f5e88138366466822f8dd04bb4a29b586b944f2075f not found: ID does not exist" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.369524 5010 scope.go:117] "RemoveContainer" containerID="ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081" Nov 26 17:40:48 crc kubenswrapper[5010]: E1126 17:40:48.369916 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081\": container with ID starting with ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081 not found: ID does not exist" containerID="ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081" Nov 26 17:40:48 crc kubenswrapper[5010]: I1126 17:40:48.369968 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081"} err="failed to get container status \"ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081\": rpc error: code = NotFound desc = could not find container \"ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081\": container with ID starting with ee8864b8d7e4e5f146112593f35f9a1e2b895f9aefd3155ff2fb437c75553081 not found: ID does not exist" Nov 26 17:40:49 crc kubenswrapper[5010]: I1126 17:40:49.906611 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53a81e07-adc4-4915-a1f1-871188924729" path="/var/lib/kubelet/pods/53a81e07-adc4-4915-a1f1-871188924729/volumes" Nov 26 17:40:55 crc kubenswrapper[5010]: I1126 17:40:55.313191 5010 generic.go:334] "Generic (PLEG): container finished" podID="78e89a72-3a79-4431-aa18-ea2e358242ec" containerID="73025054788ee901182addb2045d90a2d1fd4f2003d261331462563832399c6f" exitCode=0 Nov 26 17:40:55 crc kubenswrapper[5010]: I1126 17:40:55.313276 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" event={"ID":"78e89a72-3a79-4431-aa18-ea2e358242ec","Type":"ContainerDied","Data":"73025054788ee901182addb2045d90a2d1fd4f2003d261331462563832399c6f"} Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.823121 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.884963 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-inventory\") pod \"78e89a72-3a79-4431-aa18-ea2e358242ec\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.885132 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7ds6d\" (UniqueName: \"kubernetes.io/projected/78e89a72-3a79-4431-aa18-ea2e358242ec-kube-api-access-7ds6d\") pod \"78e89a72-3a79-4431-aa18-ea2e358242ec\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.886474 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-ssh-key\") pod \"78e89a72-3a79-4431-aa18-ea2e358242ec\" (UID: \"78e89a72-3a79-4431-aa18-ea2e358242ec\") " Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.891387 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78e89a72-3a79-4431-aa18-ea2e358242ec-kube-api-access-7ds6d" (OuterVolumeSpecName: "kube-api-access-7ds6d") pod "78e89a72-3a79-4431-aa18-ea2e358242ec" (UID: "78e89a72-3a79-4431-aa18-ea2e358242ec"). InnerVolumeSpecName "kube-api-access-7ds6d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.915897 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-inventory" (OuterVolumeSpecName: "inventory") pod "78e89a72-3a79-4431-aa18-ea2e358242ec" (UID: "78e89a72-3a79-4431-aa18-ea2e358242ec"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.917686 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "78e89a72-3a79-4431-aa18-ea2e358242ec" (UID: "78e89a72-3a79-4431-aa18-ea2e358242ec"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.990787 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.990845 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/78e89a72-3a79-4431-aa18-ea2e358242ec-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:56 crc kubenswrapper[5010]: I1126 17:40:56.990856 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7ds6d\" (UniqueName: \"kubernetes.io/projected/78e89a72-3a79-4431-aa18-ea2e358242ec-kube-api-access-7ds6d\") on node \"crc\" DevicePath \"\"" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.341091 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" event={"ID":"78e89a72-3a79-4431-aa18-ea2e358242ec","Type":"ContainerDied","Data":"6fe1aea72c04bdde6430c34a88598f63585846bf2fe8851002da694d52fecf66"} Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.341177 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-openstack-openstack-cell1-dzfdn" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.344843 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fe1aea72c04bdde6430c34a88598f63585846bf2fe8851002da694d52fecf66" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.447583 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-9qjx4"] Nov 26 17:40:57 crc kubenswrapper[5010]: E1126 17:40:57.448442 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="extract-content" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.448489 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="extract-content" Nov 26 17:40:57 crc kubenswrapper[5010]: E1126 17:40:57.448523 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="registry-server" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.448542 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="registry-server" Nov 26 17:40:57 crc kubenswrapper[5010]: E1126 17:40:57.448591 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="extract-utilities" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.448612 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="extract-utilities" Nov 26 17:40:57 crc kubenswrapper[5010]: E1126 17:40:57.448668 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78e89a72-3a79-4431-aa18-ea2e358242ec" containerName="run-os-openstack-openstack-cell1" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.448689 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="78e89a72-3a79-4431-aa18-ea2e358242ec" containerName="run-os-openstack-openstack-cell1" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.449300 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="78e89a72-3a79-4431-aa18-ea2e358242ec" containerName="run-os-openstack-openstack-cell1" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.449337 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="53a81e07-adc4-4915-a1f1-871188924729" containerName="registry-server" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.450837 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.455473 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.455687 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.456693 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.456912 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.461168 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-9qjx4"] Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.501479 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-inventory\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.501689 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.501764 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b942k\" (UniqueName: \"kubernetes.io/projected/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-kube-api-access-b942k\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.605069 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b942k\" (UniqueName: \"kubernetes.io/projected/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-kube-api-access-b942k\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.606167 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-inventory\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.607141 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.636735 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-inventory\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.637193 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-ssh-key\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.639499 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b942k\" (UniqueName: \"kubernetes.io/projected/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-kube-api-access-b942k\") pod \"reboot-os-openstack-openstack-cell1-9qjx4\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:57 crc kubenswrapper[5010]: I1126 17:40:57.780948 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:40:58 crc kubenswrapper[5010]: I1126 17:40:58.322350 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-openstack-openstack-cell1-9qjx4"] Nov 26 17:40:58 crc kubenswrapper[5010]: I1126 17:40:58.355263 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" event={"ID":"3fc3e158-6b98-4a72-85b5-a50aad4fe33e","Type":"ContainerStarted","Data":"b8135b7a2ed0c93733740d4182f35f2c50f925846a50eb7604d5d57059acf17e"} Nov 26 17:40:59 crc kubenswrapper[5010]: I1126 17:40:59.368055 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" event={"ID":"3fc3e158-6b98-4a72-85b5-a50aad4fe33e","Type":"ContainerStarted","Data":"27e6c39ec633cb4e1f897dd061a9ac4ea37570d481cbca7841ebb1c2a7ccd8b5"} Nov 26 17:40:59 crc kubenswrapper[5010]: I1126 17:40:59.383461 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" podStartSLOduration=1.91833695 podStartE2EDuration="2.38343724s" podCreationTimestamp="2025-11-26 17:40:57 +0000 UTC" firstStartedPulling="2025-11-26 17:40:58.330238446 +0000 UTC m=+8079.120955594" lastFinishedPulling="2025-11-26 17:40:58.795338716 +0000 UTC m=+8079.586055884" observedRunningTime="2025-11-26 17:40:59.38343175 +0000 UTC m=+8080.174148898" watchObservedRunningTime="2025-11-26 17:40:59.38343724 +0000 UTC m=+8080.174154388" Nov 26 17:41:15 crc kubenswrapper[5010]: I1126 17:41:15.526974 5010 generic.go:334] "Generic (PLEG): container finished" podID="3fc3e158-6b98-4a72-85b5-a50aad4fe33e" containerID="27e6c39ec633cb4e1f897dd061a9ac4ea37570d481cbca7841ebb1c2a7ccd8b5" exitCode=0 Nov 26 17:41:15 crc kubenswrapper[5010]: I1126 17:41:15.527038 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" event={"ID":"3fc3e158-6b98-4a72-85b5-a50aad4fe33e","Type":"ContainerDied","Data":"27e6c39ec633cb4e1f897dd061a9ac4ea37570d481cbca7841ebb1c2a7ccd8b5"} Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.028951 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.072250 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-inventory\") pod \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.072297 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-ssh-key\") pod \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.072419 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b942k\" (UniqueName: \"kubernetes.io/projected/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-kube-api-access-b942k\") pod \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\" (UID: \"3fc3e158-6b98-4a72-85b5-a50aad4fe33e\") " Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.081076 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-kube-api-access-b942k" (OuterVolumeSpecName: "kube-api-access-b942k") pod "3fc3e158-6b98-4a72-85b5-a50aad4fe33e" (UID: "3fc3e158-6b98-4a72-85b5-a50aad4fe33e"). InnerVolumeSpecName "kube-api-access-b942k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.107846 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3fc3e158-6b98-4a72-85b5-a50aad4fe33e" (UID: "3fc3e158-6b98-4a72-85b5-a50aad4fe33e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.124479 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-inventory" (OuterVolumeSpecName: "inventory") pod "3fc3e158-6b98-4a72-85b5-a50aad4fe33e" (UID: "3fc3e158-6b98-4a72-85b5-a50aad4fe33e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.175477 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.175774 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.175940 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b942k\" (UniqueName: \"kubernetes.io/projected/3fc3e158-6b98-4a72-85b5-a50aad4fe33e-kube-api-access-b942k\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.548694 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" event={"ID":"3fc3e158-6b98-4a72-85b5-a50aad4fe33e","Type":"ContainerDied","Data":"b8135b7a2ed0c93733740d4182f35f2c50f925846a50eb7604d5d57059acf17e"} Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.548790 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8135b7a2ed0c93733740d4182f35f2c50f925846a50eb7604d5d57059acf17e" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.548864 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-openstack-openstack-cell1-9qjx4" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.666192 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-jcr48"] Nov 26 17:41:17 crc kubenswrapper[5010]: E1126 17:41:17.666704 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fc3e158-6b98-4a72-85b5-a50aad4fe33e" containerName="reboot-os-openstack-openstack-cell1" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.666738 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fc3e158-6b98-4a72-85b5-a50aad4fe33e" containerName="reboot-os-openstack-openstack-cell1" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.666969 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fc3e158-6b98-4a72-85b5-a50aad4fe33e" containerName="reboot-os-openstack-openstack-cell1" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.667774 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.672536 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-libvirt-default-certs-0" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.672799 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.673044 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.673331 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-neutron-metadata-default-certs-0" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.673766 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.674026 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-ovn-default-certs-0" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.674231 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-telemetry-default-certs-0" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.674499 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.684083 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-jcr48"] Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.688815 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.688882 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.688935 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ssh-key\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.688961 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.688984 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689000 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689054 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689107 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l79lh\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-kube-api-access-l79lh\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689139 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689162 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689180 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689197 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-inventory\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689277 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689301 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.689317 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790540 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790587 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790607 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790645 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790688 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790727 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ssh-key\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790749 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790773 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790788 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790808 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790884 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l79lh\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-kube-api-access-l79lh\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790925 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790947 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790964 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-inventory\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.790980 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.795695 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-metadata-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.795905 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-sriov-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.796076 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-bootstrap-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.796836 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-nova-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.796917 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-inventory\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.797116 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ssh-key\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.797925 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-libvirt-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.799501 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-telemetry-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.800556 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-libvirt-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.800595 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-dhcp-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.802550 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-telemetry-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.802951 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-neutron-metadata-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.803505 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ovn-combined-ca-bundle\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.803638 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-ovn-default-certs-0\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:17 crc kubenswrapper[5010]: I1126 17:41:17.808621 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l79lh\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-kube-api-access-l79lh\") pod \"install-certs-openstack-openstack-cell1-jcr48\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:18 crc kubenswrapper[5010]: I1126 17:41:18.007808 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:18 crc kubenswrapper[5010]: I1126 17:41:18.744846 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-openstack-openstack-cell1-jcr48"] Nov 26 17:41:19 crc kubenswrapper[5010]: I1126 17:41:19.567789 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" event={"ID":"ed349151-ee95-4152-bff2-a9607e724140","Type":"ContainerStarted","Data":"3944e9b2edcebbb56dd4f55a2df66bcaf8ddee82aceb6c2bdb208b8306b05d59"} Nov 26 17:41:19 crc kubenswrapper[5010]: I1126 17:41:19.568379 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" event={"ID":"ed349151-ee95-4152-bff2-a9607e724140","Type":"ContainerStarted","Data":"917a08e048b183b4782495ee62cb3a96ed9b96962a0f861ee639d60588e63231"} Nov 26 17:41:19 crc kubenswrapper[5010]: I1126 17:41:19.588762 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" podStartSLOduration=2.122923422 podStartE2EDuration="2.588745231s" podCreationTimestamp="2025-11-26 17:41:17 +0000 UTC" firstStartedPulling="2025-11-26 17:41:18.761680273 +0000 UTC m=+8099.552397421" lastFinishedPulling="2025-11-26 17:41:19.227502082 +0000 UTC m=+8100.018219230" observedRunningTime="2025-11-26 17:41:19.586765652 +0000 UTC m=+8100.377482820" watchObservedRunningTime="2025-11-26 17:41:19.588745231 +0000 UTC m=+8100.379462389" Nov 26 17:41:58 crc kubenswrapper[5010]: I1126 17:41:58.011514 5010 generic.go:334] "Generic (PLEG): container finished" podID="ed349151-ee95-4152-bff2-a9607e724140" containerID="3944e9b2edcebbb56dd4f55a2df66bcaf8ddee82aceb6c2bdb208b8306b05d59" exitCode=0 Nov 26 17:41:58 crc kubenswrapper[5010]: I1126 17:41:58.011568 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" event={"ID":"ed349151-ee95-4152-bff2-a9607e724140","Type":"ContainerDied","Data":"3944e9b2edcebbb56dd4f55a2df66bcaf8ddee82aceb6c2bdb208b8306b05d59"} Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.456633 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584077 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l79lh\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-kube-api-access-l79lh\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584163 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-libvirt-default-certs-0\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584194 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-dhcp-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584270 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ssh-key\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584318 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-inventory\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584354 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-telemetry-default-certs-0\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584386 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-telemetry-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584427 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-sriov-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584476 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-ovn-default-certs-0\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584506 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-bootstrap-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584555 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-nova-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584634 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-neutron-metadata-default-certs-0\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584674 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-libvirt-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584734 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-metadata-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.584782 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ovn-combined-ca-bundle\") pod \"ed349151-ee95-4152-bff2-a9607e724140\" (UID: \"ed349151-ee95-4152-bff2-a9607e724140\") " Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.590377 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.590398 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-ovn-default-certs-0") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "openstack-cell1-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.590509 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.593298 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.594178 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.599118 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.599173 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.599397 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-telemetry-default-certs-0") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "openstack-cell1-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.599454 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-neutron-metadata-default-certs-0") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "openstack-cell1-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.599665 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-cell1-libvirt-default-certs-0") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "openstack-cell1-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.600259 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.600555 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.601371 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-kube-api-access-l79lh" (OuterVolumeSpecName: "kube-api-access-l79lh") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "kube-api-access-l79lh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.621756 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-inventory" (OuterVolumeSpecName: "inventory") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.623704 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ed349151-ee95-4152-bff2-a9607e724140" (UID: "ed349151-ee95-4152-bff2-a9607e724140"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688811 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688861 5010 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688880 5010 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688899 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688917 5010 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688934 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688950 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688966 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l79lh\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-kube-api-access-l79lh\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.688982 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.689001 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.689017 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.689034 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.689051 5010 reconciler_common.go:293] "Volume detached for volume \"openstack-cell1-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ed349151-ee95-4152-bff2-a9607e724140-openstack-cell1-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.689070 5010 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:41:59 crc kubenswrapper[5010]: I1126 17:41:59.689087 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed349151-ee95-4152-bff2-a9607e724140-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.047539 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" event={"ID":"ed349151-ee95-4152-bff2-a9607e724140","Type":"ContainerDied","Data":"917a08e048b183b4782495ee62cb3a96ed9b96962a0f861ee639d60588e63231"} Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.047595 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="917a08e048b183b4782495ee62cb3a96ed9b96962a0f861ee639d60588e63231" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.047702 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-openstack-openstack-cell1-jcr48" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.163361 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-openstack-openstack-cell1-slxr9"] Nov 26 17:42:00 crc kubenswrapper[5010]: E1126 17:42:00.164008 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed349151-ee95-4152-bff2-a9607e724140" containerName="install-certs-openstack-openstack-cell1" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.164033 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed349151-ee95-4152-bff2-a9607e724140" containerName="install-certs-openstack-openstack-cell1" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.164331 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed349151-ee95-4152-bff2-a9607e724140" containerName="install-certs-openstack-openstack-cell1" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.165386 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.171399 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.171430 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.171541 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.171584 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.175536 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.179121 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-slxr9"] Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.303880 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-inventory\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.304202 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.304320 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ssh-key\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.304377 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhrfx\" (UniqueName: \"kubernetes.io/projected/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-kube-api-access-fhrfx\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.304453 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.406500 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ssh-key\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.406553 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhrfx\" (UniqueName: \"kubernetes.io/projected/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-kube-api-access-fhrfx\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.406647 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.406762 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-inventory\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.406787 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.407617 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovncontroller-config-0\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.412028 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovn-combined-ca-bundle\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.413284 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ssh-key\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.420511 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-inventory\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.425599 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhrfx\" (UniqueName: \"kubernetes.io/projected/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-kube-api-access-fhrfx\") pod \"ovn-openstack-openstack-cell1-slxr9\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:00 crc kubenswrapper[5010]: I1126 17:42:00.485381 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:42:01 crc kubenswrapper[5010]: I1126 17:42:01.042439 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-openstack-openstack-cell1-slxr9"] Nov 26 17:42:01 crc kubenswrapper[5010]: I1126 17:42:01.057129 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-slxr9" event={"ID":"e8cd0354-9014-4ae0-b985-f3c0e9e4d456","Type":"ContainerStarted","Data":"c8e4ad852b5de7a6fb626831c33191c7193e39f3b6bb3fd6e6678e6036e923c9"} Nov 26 17:42:02 crc kubenswrapper[5010]: I1126 17:42:02.073604 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-slxr9" event={"ID":"e8cd0354-9014-4ae0-b985-f3c0e9e4d456","Type":"ContainerStarted","Data":"2d79cb5a579b69b98a235451277842fea164dcafc87dc5bdc0d322e4f0355c06"} Nov 26 17:42:02 crc kubenswrapper[5010]: I1126 17:42:02.094415 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-openstack-openstack-cell1-slxr9" podStartSLOduration=1.366966324 podStartE2EDuration="2.094393585s" podCreationTimestamp="2025-11-26 17:42:00 +0000 UTC" firstStartedPulling="2025-11-26 17:42:01.045615007 +0000 UTC m=+8141.836332155" lastFinishedPulling="2025-11-26 17:42:01.773042258 +0000 UTC m=+8142.563759416" observedRunningTime="2025-11-26 17:42:02.089428952 +0000 UTC m=+8142.880146140" watchObservedRunningTime="2025-11-26 17:42:02.094393585 +0000 UTC m=+8142.885110733" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.151255 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bzpwv"] Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.154141 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.179652 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzpwv"] Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.258736 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-catalog-content\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.259087 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-utilities\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.259243 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrgfh\" (UniqueName: \"kubernetes.io/projected/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-kube-api-access-zrgfh\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.360850 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-catalog-content\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.360995 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-utilities\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.361035 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrgfh\" (UniqueName: \"kubernetes.io/projected/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-kube-api-access-zrgfh\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.361289 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-catalog-content\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.361384 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-utilities\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.388736 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrgfh\" (UniqueName: \"kubernetes.io/projected/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-kube-api-access-zrgfh\") pod \"redhat-marketplace-bzpwv\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:38 crc kubenswrapper[5010]: I1126 17:42:38.476828 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:39 crc kubenswrapper[5010]: I1126 17:42:39.008157 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzpwv"] Nov 26 17:42:39 crc kubenswrapper[5010]: W1126 17:42:39.019687 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf574a197_ba27_4c29_8f84_e4e13bf4f0ad.slice/crio-e8cdbc25cbdb8162fe378947e5b2312098320407dcce5484a935596108afcdc0 WatchSource:0}: Error finding container e8cdbc25cbdb8162fe378947e5b2312098320407dcce5484a935596108afcdc0: Status 404 returned error can't find the container with id e8cdbc25cbdb8162fe378947e5b2312098320407dcce5484a935596108afcdc0 Nov 26 17:42:39 crc kubenswrapper[5010]: I1126 17:42:39.483739 5010 generic.go:334] "Generic (PLEG): container finished" podID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerID="69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06" exitCode=0 Nov 26 17:42:39 crc kubenswrapper[5010]: I1126 17:42:39.483784 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzpwv" event={"ID":"f574a197-ba27-4c29-8f84-e4e13bf4f0ad","Type":"ContainerDied","Data":"69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06"} Nov 26 17:42:39 crc kubenswrapper[5010]: I1126 17:42:39.483824 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzpwv" event={"ID":"f574a197-ba27-4c29-8f84-e4e13bf4f0ad","Type":"ContainerStarted","Data":"e8cdbc25cbdb8162fe378947e5b2312098320407dcce5484a935596108afcdc0"} Nov 26 17:42:45 crc kubenswrapper[5010]: I1126 17:42:45.552741 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzpwv" event={"ID":"f574a197-ba27-4c29-8f84-e4e13bf4f0ad","Type":"ContainerStarted","Data":"1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2"} Nov 26 17:42:46 crc kubenswrapper[5010]: I1126 17:42:46.588580 5010 generic.go:334] "Generic (PLEG): container finished" podID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerID="1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2" exitCode=0 Nov 26 17:42:46 crc kubenswrapper[5010]: I1126 17:42:46.588692 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzpwv" event={"ID":"f574a197-ba27-4c29-8f84-e4e13bf4f0ad","Type":"ContainerDied","Data":"1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2"} Nov 26 17:42:47 crc kubenswrapper[5010]: I1126 17:42:47.607685 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzpwv" event={"ID":"f574a197-ba27-4c29-8f84-e4e13bf4f0ad","Type":"ContainerStarted","Data":"45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57"} Nov 26 17:42:47 crc kubenswrapper[5010]: I1126 17:42:47.644868 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bzpwv" podStartSLOduration=2.097178144 podStartE2EDuration="9.644846124s" podCreationTimestamp="2025-11-26 17:42:38 +0000 UTC" firstStartedPulling="2025-11-26 17:42:39.485689345 +0000 UTC m=+8180.276406493" lastFinishedPulling="2025-11-26 17:42:47.033357315 +0000 UTC m=+8187.824074473" observedRunningTime="2025-11-26 17:42:47.633168594 +0000 UTC m=+8188.423885752" watchObservedRunningTime="2025-11-26 17:42:47.644846124 +0000 UTC m=+8188.435563272" Nov 26 17:42:48 crc kubenswrapper[5010]: I1126 17:42:48.478041 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:48 crc kubenswrapper[5010]: I1126 17:42:48.478396 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:48 crc kubenswrapper[5010]: I1126 17:42:48.529337 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:58 crc kubenswrapper[5010]: I1126 17:42:58.528228 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:58 crc kubenswrapper[5010]: I1126 17:42:58.585957 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzpwv"] Nov 26 17:42:58 crc kubenswrapper[5010]: I1126 17:42:58.756384 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bzpwv" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="registry-server" containerID="cri-o://45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57" gracePeriod=2 Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.303315 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.342191 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-utilities\") pod \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.342546 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-catalog-content\") pod \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.342776 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrgfh\" (UniqueName: \"kubernetes.io/projected/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-kube-api-access-zrgfh\") pod \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\" (UID: \"f574a197-ba27-4c29-8f84-e4e13bf4f0ad\") " Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.343239 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-utilities" (OuterVolumeSpecName: "utilities") pod "f574a197-ba27-4c29-8f84-e4e13bf4f0ad" (UID: "f574a197-ba27-4c29-8f84-e4e13bf4f0ad"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.343525 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.351379 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-kube-api-access-zrgfh" (OuterVolumeSpecName: "kube-api-access-zrgfh") pod "f574a197-ba27-4c29-8f84-e4e13bf4f0ad" (UID: "f574a197-ba27-4c29-8f84-e4e13bf4f0ad"). InnerVolumeSpecName "kube-api-access-zrgfh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.366338 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f574a197-ba27-4c29-8f84-e4e13bf4f0ad" (UID: "f574a197-ba27-4c29-8f84-e4e13bf4f0ad"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.445660 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.445691 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrgfh\" (UniqueName: \"kubernetes.io/projected/f574a197-ba27-4c29-8f84-e4e13bf4f0ad-kube-api-access-zrgfh\") on node \"crc\" DevicePath \"\"" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.771018 5010 generic.go:334] "Generic (PLEG): container finished" podID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerID="45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57" exitCode=0 Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.771885 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzpwv" event={"ID":"f574a197-ba27-4c29-8f84-e4e13bf4f0ad","Type":"ContainerDied","Data":"45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57"} Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.771955 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzpwv" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.771981 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzpwv" event={"ID":"f574a197-ba27-4c29-8f84-e4e13bf4f0ad","Type":"ContainerDied","Data":"e8cdbc25cbdb8162fe378947e5b2312098320407dcce5484a935596108afcdc0"} Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.772021 5010 scope.go:117] "RemoveContainer" containerID="45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.807335 5010 scope.go:117] "RemoveContainer" containerID="1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.816143 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzpwv"] Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.830230 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzpwv"] Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.855900 5010 scope.go:117] "RemoveContainer" containerID="69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.915929 5010 scope.go:117] "RemoveContainer" containerID="45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57" Nov 26 17:42:59 crc kubenswrapper[5010]: E1126 17:42:59.918404 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57\": container with ID starting with 45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57 not found: ID does not exist" containerID="45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.918451 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57"} err="failed to get container status \"45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57\": rpc error: code = NotFound desc = could not find container \"45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57\": container with ID starting with 45ac1854755573a6ef3d51d60497eea6524f42e57ea9cdebcdbcbb454a7dac57 not found: ID does not exist" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.918482 5010 scope.go:117] "RemoveContainer" containerID="1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2" Nov 26 17:42:59 crc kubenswrapper[5010]: E1126 17:42:59.922343 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2\": container with ID starting with 1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2 not found: ID does not exist" containerID="1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.922434 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2"} err="failed to get container status \"1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2\": rpc error: code = NotFound desc = could not find container \"1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2\": container with ID starting with 1afa47e4c1ab9a4d077942895311dd8691e215c912e4045ace17162992838ae2 not found: ID does not exist" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.922471 5010 scope.go:117] "RemoveContainer" containerID="69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06" Nov 26 17:42:59 crc kubenswrapper[5010]: E1126 17:42:59.926230 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06\": container with ID starting with 69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06 not found: ID does not exist" containerID="69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.926279 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06"} err="failed to get container status \"69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06\": rpc error: code = NotFound desc = could not find container \"69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06\": container with ID starting with 69384ae122a96e52946e886fb178dd754f1c6566638c8de7854844ea6348cb06 not found: ID does not exist" Nov 26 17:42:59 crc kubenswrapper[5010]: I1126 17:42:59.947064 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" path="/var/lib/kubelet/pods/f574a197-ba27-4c29-8f84-e4e13bf4f0ad/volumes" Nov 26 17:43:11 crc kubenswrapper[5010]: I1126 17:43:11.423065 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:43:11 crc kubenswrapper[5010]: I1126 17:43:11.423679 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:43:11 crc kubenswrapper[5010]: I1126 17:43:11.930631 5010 generic.go:334] "Generic (PLEG): container finished" podID="e8cd0354-9014-4ae0-b985-f3c0e9e4d456" containerID="2d79cb5a579b69b98a235451277842fea164dcafc87dc5bdc0d322e4f0355c06" exitCode=0 Nov 26 17:43:11 crc kubenswrapper[5010]: I1126 17:43:11.930760 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-slxr9" event={"ID":"e8cd0354-9014-4ae0-b985-f3c0e9e4d456","Type":"ContainerDied","Data":"2d79cb5a579b69b98a235451277842fea164dcafc87dc5bdc0d322e4f0355c06"} Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.473002 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.586999 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fhrfx\" (UniqueName: \"kubernetes.io/projected/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-kube-api-access-fhrfx\") pod \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.587072 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovncontroller-config-0\") pod \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.587109 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ssh-key\") pod \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.587275 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-inventory\") pod \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.587384 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovn-combined-ca-bundle\") pod \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\" (UID: \"e8cd0354-9014-4ae0-b985-f3c0e9e4d456\") " Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.595970 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "e8cd0354-9014-4ae0-b985-f3c0e9e4d456" (UID: "e8cd0354-9014-4ae0-b985-f3c0e9e4d456"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.596084 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-kube-api-access-fhrfx" (OuterVolumeSpecName: "kube-api-access-fhrfx") pod "e8cd0354-9014-4ae0-b985-f3c0e9e4d456" (UID: "e8cd0354-9014-4ae0-b985-f3c0e9e4d456"). InnerVolumeSpecName "kube-api-access-fhrfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.628119 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "e8cd0354-9014-4ae0-b985-f3c0e9e4d456" (UID: "e8cd0354-9014-4ae0-b985-f3c0e9e4d456"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.631151 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e8cd0354-9014-4ae0-b985-f3c0e9e4d456" (UID: "e8cd0354-9014-4ae0-b985-f3c0e9e4d456"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.637201 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-inventory" (OuterVolumeSpecName: "inventory") pod "e8cd0354-9014-4ae0-b985-f3c0e9e4d456" (UID: "e8cd0354-9014-4ae0-b985-f3c0e9e4d456"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.689263 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.689471 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.689553 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fhrfx\" (UniqueName: \"kubernetes.io/projected/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-kube-api-access-fhrfx\") on node \"crc\" DevicePath \"\"" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.689633 5010 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.689703 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e8cd0354-9014-4ae0-b985-f3c0e9e4d456-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.960101 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-openstack-openstack-cell1-slxr9" event={"ID":"e8cd0354-9014-4ae0-b985-f3c0e9e4d456","Type":"ContainerDied","Data":"c8e4ad852b5de7a6fb626831c33191c7193e39f3b6bb3fd6e6678e6036e923c9"} Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.960172 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8e4ad852b5de7a6fb626831c33191c7193e39f3b6bb3fd6e6678e6036e923c9" Nov 26 17:43:13 crc kubenswrapper[5010]: I1126 17:43:13.960277 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-openstack-openstack-cell1-slxr9" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.092860 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-djdb8"] Nov 26 17:43:14 crc kubenswrapper[5010]: E1126 17:43:14.093263 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="registry-server" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.093278 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="registry-server" Nov 26 17:43:14 crc kubenswrapper[5010]: E1126 17:43:14.093309 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="extract-utilities" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.093316 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="extract-utilities" Nov 26 17:43:14 crc kubenswrapper[5010]: E1126 17:43:14.093339 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8cd0354-9014-4ae0-b985-f3c0e9e4d456" containerName="ovn-openstack-openstack-cell1" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.093346 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8cd0354-9014-4ae0-b985-f3c0e9e4d456" containerName="ovn-openstack-openstack-cell1" Nov 26 17:43:14 crc kubenswrapper[5010]: E1126 17:43:14.093365 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="extract-content" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.093371 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="extract-content" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.093563 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8cd0354-9014-4ae0-b985-f3c0e9e4d456" containerName="ovn-openstack-openstack-cell1" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.093579 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f574a197-ba27-4c29-8f84-e4e13bf4f0ad" containerName="registry-server" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.094385 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.097147 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.097362 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.097469 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.099758 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.099839 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.099893 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.100001 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.100111 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxzdt\" (UniqueName: \"kubernetes.io/projected/5d7160f7-44f5-4094-a05e-692f659806bc-kube-api-access-xxzdt\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.100167 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.101193 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.101814 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.101951 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.115837 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-djdb8"] Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.202861 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.202956 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxzdt\" (UniqueName: \"kubernetes.io/projected/5d7160f7-44f5-4094-a05e-692f659806bc-kube-api-access-xxzdt\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.202995 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.203025 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.203083 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.203121 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.210890 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.213399 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.217473 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-inventory\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.219136 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-ssh-key\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.227371 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-nova-metadata-neutron-config-0\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.235451 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxzdt\" (UniqueName: \"kubernetes.io/projected/5d7160f7-44f5-4094-a05e-692f659806bc-kube-api-access-xxzdt\") pod \"neutron-metadata-openstack-openstack-cell1-djdb8\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.415000 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:43:14 crc kubenswrapper[5010]: I1126 17:43:14.993302 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-openstack-openstack-cell1-djdb8"] Nov 26 17:43:15 crc kubenswrapper[5010]: I1126 17:43:15.982950 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" event={"ID":"5d7160f7-44f5-4094-a05e-692f659806bc","Type":"ContainerStarted","Data":"1328e6e531b52cf73c247fbe887319a7e1d63344c25e23d942f681018ad826e1"} Nov 26 17:43:15 crc kubenswrapper[5010]: I1126 17:43:15.983590 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" event={"ID":"5d7160f7-44f5-4094-a05e-692f659806bc","Type":"ContainerStarted","Data":"4620369fdbddf187587134c78da8808b348476b46180f60078dd028ba365baf1"} Nov 26 17:43:16 crc kubenswrapper[5010]: I1126 17:43:16.012567 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" podStartSLOduration=1.436740946 podStartE2EDuration="2.012544968s" podCreationTimestamp="2025-11-26 17:43:14 +0000 UTC" firstStartedPulling="2025-11-26 17:43:15.0012146 +0000 UTC m=+8215.791931768" lastFinishedPulling="2025-11-26 17:43:15.577018602 +0000 UTC m=+8216.367735790" observedRunningTime="2025-11-26 17:43:15.998421187 +0000 UTC m=+8216.789138355" watchObservedRunningTime="2025-11-26 17:43:16.012544968 +0000 UTC m=+8216.803262126" Nov 26 17:43:41 crc kubenswrapper[5010]: I1126 17:43:41.422753 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:43:41 crc kubenswrapper[5010]: I1126 17:43:41.423221 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.423010 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.423447 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.423495 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.424380 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1e137fb0baffea03d92cc4f06527eedfbf4028b857c4698a78480b1bc12370d8"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.424449 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://1e137fb0baffea03d92cc4f06527eedfbf4028b857c4698a78480b1bc12370d8" gracePeriod=600 Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.621153 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="1e137fb0baffea03d92cc4f06527eedfbf4028b857c4698a78480b1bc12370d8" exitCode=0 Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.621257 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"1e137fb0baffea03d92cc4f06527eedfbf4028b857c4698a78480b1bc12370d8"} Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.621321 5010 scope.go:117] "RemoveContainer" containerID="4c859a8a6b3cef3ec71f83bceb023ffdb8732af0a3e6d05f68d8d66dd0ec45ec" Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.625366 5010 generic.go:334] "Generic (PLEG): container finished" podID="5d7160f7-44f5-4094-a05e-692f659806bc" containerID="1328e6e531b52cf73c247fbe887319a7e1d63344c25e23d942f681018ad826e1" exitCode=0 Nov 26 17:44:11 crc kubenswrapper[5010]: I1126 17:44:11.625401 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" event={"ID":"5d7160f7-44f5-4094-a05e-692f659806bc","Type":"ContainerDied","Data":"1328e6e531b52cf73c247fbe887319a7e1d63344c25e23d942f681018ad826e1"} Nov 26 17:44:12 crc kubenswrapper[5010]: I1126 17:44:12.642965 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f"} Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.093505 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.193019 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-metadata-combined-ca-bundle\") pod \"5d7160f7-44f5-4094-a05e-692f659806bc\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.193100 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-inventory\") pod \"5d7160f7-44f5-4094-a05e-692f659806bc\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.193202 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxzdt\" (UniqueName: \"kubernetes.io/projected/5d7160f7-44f5-4094-a05e-692f659806bc-kube-api-access-xxzdt\") pod \"5d7160f7-44f5-4094-a05e-692f659806bc\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.193247 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-ssh-key\") pod \"5d7160f7-44f5-4094-a05e-692f659806bc\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.193278 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-ovn-metadata-agent-neutron-config-0\") pod \"5d7160f7-44f5-4094-a05e-692f659806bc\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.193450 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-nova-metadata-neutron-config-0\") pod \"5d7160f7-44f5-4094-a05e-692f659806bc\" (UID: \"5d7160f7-44f5-4094-a05e-692f659806bc\") " Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.198539 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d7160f7-44f5-4094-a05e-692f659806bc-kube-api-access-xxzdt" (OuterVolumeSpecName: "kube-api-access-xxzdt") pod "5d7160f7-44f5-4094-a05e-692f659806bc" (UID: "5d7160f7-44f5-4094-a05e-692f659806bc"). InnerVolumeSpecName "kube-api-access-xxzdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.198563 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "5d7160f7-44f5-4094-a05e-692f659806bc" (UID: "5d7160f7-44f5-4094-a05e-692f659806bc"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.225738 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-inventory" (OuterVolumeSpecName: "inventory") pod "5d7160f7-44f5-4094-a05e-692f659806bc" (UID: "5d7160f7-44f5-4094-a05e-692f659806bc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.229528 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "5d7160f7-44f5-4094-a05e-692f659806bc" (UID: "5d7160f7-44f5-4094-a05e-692f659806bc"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.230110 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5d7160f7-44f5-4094-a05e-692f659806bc" (UID: "5d7160f7-44f5-4094-a05e-692f659806bc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.237222 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "5d7160f7-44f5-4094-a05e-692f659806bc" (UID: "5d7160f7-44f5-4094-a05e-692f659806bc"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.295560 5010 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.295600 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.295613 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.295624 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxzdt\" (UniqueName: \"kubernetes.io/projected/5d7160f7-44f5-4094-a05e-692f659806bc-kube-api-access-xxzdt\") on node \"crc\" DevicePath \"\"" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.295632 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.295642 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/5d7160f7-44f5-4094-a05e-692f659806bc-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.656361 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.656364 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-openstack-openstack-cell1-djdb8" event={"ID":"5d7160f7-44f5-4094-a05e-692f659806bc","Type":"ContainerDied","Data":"4620369fdbddf187587134c78da8808b348476b46180f60078dd028ba365baf1"} Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.657152 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4620369fdbddf187587134c78da8808b348476b46180f60078dd028ba365baf1" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.763366 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-gf4kg"] Nov 26 17:44:13 crc kubenswrapper[5010]: E1126 17:44:13.763946 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d7160f7-44f5-4094-a05e-692f659806bc" containerName="neutron-metadata-openstack-openstack-cell1" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.763971 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d7160f7-44f5-4094-a05e-692f659806bc" containerName="neutron-metadata-openstack-openstack-cell1" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.764241 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d7160f7-44f5-4094-a05e-692f659806bc" containerName="neutron-metadata-openstack-openstack-cell1" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.765202 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.766925 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.767674 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.767909 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.768102 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.770665 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.805565 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-gf4kg"] Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.805664 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.805739 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.805831 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-ssh-key\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.805903 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-inventory\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.805942 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcrhp\" (UniqueName: \"kubernetes.io/projected/42336123-89c2-4bfd-8772-ce5dca1dd4a5-kube-api-access-mcrhp\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.907578 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-ssh-key\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.907665 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-inventory\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.907724 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcrhp\" (UniqueName: \"kubernetes.io/projected/42336123-89c2-4bfd-8772-ce5dca1dd4a5-kube-api-access-mcrhp\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.907836 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.907876 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.913070 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-inventory\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.914001 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-secret-0\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.914277 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-ssh-key\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.914759 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-combined-ca-bundle\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:13 crc kubenswrapper[5010]: I1126 17:44:13.923812 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcrhp\" (UniqueName: \"kubernetes.io/projected/42336123-89c2-4bfd-8772-ce5dca1dd4a5-kube-api-access-mcrhp\") pod \"libvirt-openstack-openstack-cell1-gf4kg\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:14 crc kubenswrapper[5010]: I1126 17:44:14.094358 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:44:14 crc kubenswrapper[5010]: I1126 17:44:14.874681 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-openstack-openstack-cell1-gf4kg"] Nov 26 17:44:14 crc kubenswrapper[5010]: W1126 17:44:14.913508 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod42336123_89c2_4bfd_8772_ce5dca1dd4a5.slice/crio-a5eef292a766c3149f996f6be130f5150c85ed44f7f66394e4d59e0fc23c9a28 WatchSource:0}: Error finding container a5eef292a766c3149f996f6be130f5150c85ed44f7f66394e4d59e0fc23c9a28: Status 404 returned error can't find the container with id a5eef292a766c3149f996f6be130f5150c85ed44f7f66394e4d59e0fc23c9a28 Nov 26 17:44:15 crc kubenswrapper[5010]: I1126 17:44:15.700409 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" event={"ID":"42336123-89c2-4bfd-8772-ce5dca1dd4a5","Type":"ContainerStarted","Data":"a5eef292a766c3149f996f6be130f5150c85ed44f7f66394e4d59e0fc23c9a28"} Nov 26 17:44:16 crc kubenswrapper[5010]: I1126 17:44:16.718147 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" event={"ID":"42336123-89c2-4bfd-8772-ce5dca1dd4a5","Type":"ContainerStarted","Data":"aee564039dc37e4f93c1fb058f1d0070cce69f4320cc44ad17303180b5c07c2e"} Nov 26 17:44:16 crc kubenswrapper[5010]: I1126 17:44:16.736585 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" podStartSLOduration=2.946836409 podStartE2EDuration="3.736561519s" podCreationTimestamp="2025-11-26 17:44:13 +0000 UTC" firstStartedPulling="2025-11-26 17:44:14.917207036 +0000 UTC m=+8275.707924184" lastFinishedPulling="2025-11-26 17:44:15.706932146 +0000 UTC m=+8276.497649294" observedRunningTime="2025-11-26 17:44:16.734780575 +0000 UTC m=+8277.525497733" watchObservedRunningTime="2025-11-26 17:44:16.736561519 +0000 UTC m=+8277.527278677" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.186257 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62"] Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.189145 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.193356 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.193436 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.202679 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62"] Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.354294 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/30949a26-3ac2-4c47-ab95-0b1d198561c7-secret-volume\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.354545 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l6lh\" (UniqueName: \"kubernetes.io/projected/30949a26-3ac2-4c47-ab95-0b1d198561c7-kube-api-access-7l6lh\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.354894 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/30949a26-3ac2-4c47-ab95-0b1d198561c7-config-volume\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.457364 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/30949a26-3ac2-4c47-ab95-0b1d198561c7-secret-volume\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.457490 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l6lh\" (UniqueName: \"kubernetes.io/projected/30949a26-3ac2-4c47-ab95-0b1d198561c7-kube-api-access-7l6lh\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.457567 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/30949a26-3ac2-4c47-ab95-0b1d198561c7-config-volume\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.458732 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/30949a26-3ac2-4c47-ab95-0b1d198561c7-config-volume\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.478411 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/30949a26-3ac2-4c47-ab95-0b1d198561c7-secret-volume\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.480138 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l6lh\" (UniqueName: \"kubernetes.io/projected/30949a26-3ac2-4c47-ab95-0b1d198561c7-kube-api-access-7l6lh\") pod \"collect-profiles-29402985-z5d62\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:00 crc kubenswrapper[5010]: I1126 17:45:00.516472 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:01 crc kubenswrapper[5010]: I1126 17:45:01.027733 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62"] Nov 26 17:45:01 crc kubenswrapper[5010]: W1126 17:45:01.034558 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod30949a26_3ac2_4c47_ab95_0b1d198561c7.slice/crio-b03a49d74270c087c1d63e5c61f8ed1f6324f737d6bddd6baf88e287ca507723 WatchSource:0}: Error finding container b03a49d74270c087c1d63e5c61f8ed1f6324f737d6bddd6baf88e287ca507723: Status 404 returned error can't find the container with id b03a49d74270c087c1d63e5c61f8ed1f6324f737d6bddd6baf88e287ca507723 Nov 26 17:45:02 crc kubenswrapper[5010]: I1126 17:45:02.036005 5010 generic.go:334] "Generic (PLEG): container finished" podID="30949a26-3ac2-4c47-ab95-0b1d198561c7" containerID="6274e4c0c4b07b891584ce4c1b0d9666f08d6503cba62cc395ad7d024f160a74" exitCode=0 Nov 26 17:45:02 crc kubenswrapper[5010]: I1126 17:45:02.036104 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" event={"ID":"30949a26-3ac2-4c47-ab95-0b1d198561c7","Type":"ContainerDied","Data":"6274e4c0c4b07b891584ce4c1b0d9666f08d6503cba62cc395ad7d024f160a74"} Nov 26 17:45:02 crc kubenswrapper[5010]: I1126 17:45:02.036333 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" event={"ID":"30949a26-3ac2-4c47-ab95-0b1d198561c7","Type":"ContainerStarted","Data":"b03a49d74270c087c1d63e5c61f8ed1f6324f737d6bddd6baf88e287ca507723"} Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.484140 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.523913 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/30949a26-3ac2-4c47-ab95-0b1d198561c7-secret-volume\") pod \"30949a26-3ac2-4c47-ab95-0b1d198561c7\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.531004 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/30949a26-3ac2-4c47-ab95-0b1d198561c7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "30949a26-3ac2-4c47-ab95-0b1d198561c7" (UID: "30949a26-3ac2-4c47-ab95-0b1d198561c7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.625096 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/30949a26-3ac2-4c47-ab95-0b1d198561c7-config-volume\") pod \"30949a26-3ac2-4c47-ab95-0b1d198561c7\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.625167 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l6lh\" (UniqueName: \"kubernetes.io/projected/30949a26-3ac2-4c47-ab95-0b1d198561c7-kube-api-access-7l6lh\") pod \"30949a26-3ac2-4c47-ab95-0b1d198561c7\" (UID: \"30949a26-3ac2-4c47-ab95-0b1d198561c7\") " Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.625638 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/30949a26-3ac2-4c47-ab95-0b1d198561c7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.625809 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/30949a26-3ac2-4c47-ab95-0b1d198561c7-config-volume" (OuterVolumeSpecName: "config-volume") pod "30949a26-3ac2-4c47-ab95-0b1d198561c7" (UID: "30949a26-3ac2-4c47-ab95-0b1d198561c7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.630106 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/30949a26-3ac2-4c47-ab95-0b1d198561c7-kube-api-access-7l6lh" (OuterVolumeSpecName: "kube-api-access-7l6lh") pod "30949a26-3ac2-4c47-ab95-0b1d198561c7" (UID: "30949a26-3ac2-4c47-ab95-0b1d198561c7"). InnerVolumeSpecName "kube-api-access-7l6lh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.727324 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/30949a26-3ac2-4c47-ab95-0b1d198561c7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 17:45:03 crc kubenswrapper[5010]: I1126 17:45:03.727356 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7l6lh\" (UniqueName: \"kubernetes.io/projected/30949a26-3ac2-4c47-ab95-0b1d198561c7-kube-api-access-7l6lh\") on node \"crc\" DevicePath \"\"" Nov 26 17:45:04 crc kubenswrapper[5010]: I1126 17:45:04.057618 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" event={"ID":"30949a26-3ac2-4c47-ab95-0b1d198561c7","Type":"ContainerDied","Data":"b03a49d74270c087c1d63e5c61f8ed1f6324f737d6bddd6baf88e287ca507723"} Nov 26 17:45:04 crc kubenswrapper[5010]: I1126 17:45:04.057659 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b03a49d74270c087c1d63e5c61f8ed1f6324f737d6bddd6baf88e287ca507723" Nov 26 17:45:04 crc kubenswrapper[5010]: I1126 17:45:04.057681 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62" Nov 26 17:45:04 crc kubenswrapper[5010]: I1126 17:45:04.576703 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz"] Nov 26 17:45:04 crc kubenswrapper[5010]: I1126 17:45:04.590236 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402940-jhsqz"] Nov 26 17:45:05 crc kubenswrapper[5010]: I1126 17:45:05.903993 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1ac8e8a-6f69-486e-b618-a79402db39a6" path="/var/lib/kubelet/pods/d1ac8e8a-6f69-486e-b618-a79402db39a6/volumes" Nov 26 17:45:12 crc kubenswrapper[5010]: I1126 17:45:12.830508 5010 scope.go:117] "RemoveContainer" containerID="3d4f2a476d97e226c5a55dcc072405ad4d81ebc33abf34742124423da193463d" Nov 26 17:46:11 crc kubenswrapper[5010]: I1126 17:46:11.426314 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:46:11 crc kubenswrapper[5010]: I1126 17:46:11.426943 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:46:41 crc kubenswrapper[5010]: I1126 17:46:41.422994 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:46:41 crc kubenswrapper[5010]: I1126 17:46:41.423887 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.422763 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.423403 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.423468 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.424760 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.424862 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" gracePeriod=600 Nov 26 17:47:11 crc kubenswrapper[5010]: E1126 17:47:11.549955 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.719014 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" exitCode=0 Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.719091 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f"} Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.719141 5010 scope.go:117] "RemoveContainer" containerID="1e137fb0baffea03d92cc4f06527eedfbf4028b857c4698a78480b1bc12370d8" Nov 26 17:47:11 crc kubenswrapper[5010]: I1126 17:47:11.720565 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:47:11 crc kubenswrapper[5010]: E1126 17:47:11.721300 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:47:25 crc kubenswrapper[5010]: I1126 17:47:25.892939 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:47:25 crc kubenswrapper[5010]: E1126 17:47:25.893687 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:47:40 crc kubenswrapper[5010]: I1126 17:47:40.892151 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:47:40 crc kubenswrapper[5010]: E1126 17:47:40.893174 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:47:54 crc kubenswrapper[5010]: I1126 17:47:54.892409 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:47:54 crc kubenswrapper[5010]: E1126 17:47:54.894560 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:48:05 crc kubenswrapper[5010]: I1126 17:48:05.892379 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:48:05 crc kubenswrapper[5010]: E1126 17:48:05.893252 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:48:17 crc kubenswrapper[5010]: I1126 17:48:17.892378 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:48:17 crc kubenswrapper[5010]: E1126 17:48:17.893031 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:48:28 crc kubenswrapper[5010]: I1126 17:48:28.891739 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:48:28 crc kubenswrapper[5010]: E1126 17:48:28.892509 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:48:41 crc kubenswrapper[5010]: I1126 17:48:41.892978 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:48:41 crc kubenswrapper[5010]: E1126 17:48:41.894205 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:48:56 crc kubenswrapper[5010]: I1126 17:48:56.894932 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:48:56 crc kubenswrapper[5010]: E1126 17:48:56.903224 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:49:04 crc kubenswrapper[5010]: I1126 17:49:04.567062 5010 generic.go:334] "Generic (PLEG): container finished" podID="42336123-89c2-4bfd-8772-ce5dca1dd4a5" containerID="aee564039dc37e4f93c1fb058f1d0070cce69f4320cc44ad17303180b5c07c2e" exitCode=0 Nov 26 17:49:04 crc kubenswrapper[5010]: I1126 17:49:04.567180 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" event={"ID":"42336123-89c2-4bfd-8772-ce5dca1dd4a5","Type":"ContainerDied","Data":"aee564039dc37e4f93c1fb058f1d0070cce69f4320cc44ad17303180b5c07c2e"} Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.112032 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.235425 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-combined-ca-bundle\") pod \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.235484 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-ssh-key\") pod \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.235639 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcrhp\" (UniqueName: \"kubernetes.io/projected/42336123-89c2-4bfd-8772-ce5dca1dd4a5-kube-api-access-mcrhp\") pod \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.235664 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-inventory\") pod \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.235873 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-secret-0\") pod \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\" (UID: \"42336123-89c2-4bfd-8772-ce5dca1dd4a5\") " Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.241845 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "42336123-89c2-4bfd-8772-ce5dca1dd4a5" (UID: "42336123-89c2-4bfd-8772-ce5dca1dd4a5"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.242049 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42336123-89c2-4bfd-8772-ce5dca1dd4a5-kube-api-access-mcrhp" (OuterVolumeSpecName: "kube-api-access-mcrhp") pod "42336123-89c2-4bfd-8772-ce5dca1dd4a5" (UID: "42336123-89c2-4bfd-8772-ce5dca1dd4a5"). InnerVolumeSpecName "kube-api-access-mcrhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.266052 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "42336123-89c2-4bfd-8772-ce5dca1dd4a5" (UID: "42336123-89c2-4bfd-8772-ce5dca1dd4a5"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.266541 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-inventory" (OuterVolumeSpecName: "inventory") pod "42336123-89c2-4bfd-8772-ce5dca1dd4a5" (UID: "42336123-89c2-4bfd-8772-ce5dca1dd4a5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.277881 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "42336123-89c2-4bfd-8772-ce5dca1dd4a5" (UID: "42336123-89c2-4bfd-8772-ce5dca1dd4a5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.338833 5010 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.339042 5010 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.339088 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.339105 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcrhp\" (UniqueName: \"kubernetes.io/projected/42336123-89c2-4bfd-8772-ce5dca1dd4a5-kube-api-access-mcrhp\") on node \"crc\" DevicePath \"\"" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.339115 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/42336123-89c2-4bfd-8772-ce5dca1dd4a5-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.594530 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" event={"ID":"42336123-89c2-4bfd-8772-ce5dca1dd4a5","Type":"ContainerDied","Data":"a5eef292a766c3149f996f6be130f5150c85ed44f7f66394e4d59e0fc23c9a28"} Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.594876 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a5eef292a766c3149f996f6be130f5150c85ed44f7f66394e4d59e0fc23c9a28" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.594609 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-openstack-openstack-cell1-gf4kg" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.732781 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-6gf92"] Nov 26 17:49:06 crc kubenswrapper[5010]: E1126 17:49:06.733335 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42336123-89c2-4bfd-8772-ce5dca1dd4a5" containerName="libvirt-openstack-openstack-cell1" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.733356 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="42336123-89c2-4bfd-8772-ce5dca1dd4a5" containerName="libvirt-openstack-openstack-cell1" Nov 26 17:49:06 crc kubenswrapper[5010]: E1126 17:49:06.733380 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="30949a26-3ac2-4c47-ab95-0b1d198561c7" containerName="collect-profiles" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.733389 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="30949a26-3ac2-4c47-ab95-0b1d198561c7" containerName="collect-profiles" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.733608 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="42336123-89c2-4bfd-8772-ce5dca1dd4a5" containerName="libvirt-openstack-openstack-cell1" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.733639 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="30949a26-3ac2-4c47-ab95-0b1d198561c7" containerName="collect-profiles" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.734498 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.740735 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.741041 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.741261 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.741487 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.741768 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.742176 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.744658 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.750535 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-6gf92"] Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.854196 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-inventory\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.854424 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.854493 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.854529 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.854559 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.854729 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.855056 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.855150 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvxb2\" (UniqueName: \"kubernetes.io/projected/803b0121-2a6a-4ee8-b835-397db3b6bd43-kube-api-access-jvxb2\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.855204 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.957808 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.957913 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.958069 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.958099 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.958146 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.958920 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.959017 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvxb2\" (UniqueName: \"kubernetes.io/projected/803b0121-2a6a-4ee8-b835-397db3b6bd43-kube-api-access-jvxb2\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.959089 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.959190 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-inventory\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.960887 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cells-global-config-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.964920 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.968626 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.969762 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-ssh-key\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.972138 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-inventory\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.977232 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.982493 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvxb2\" (UniqueName: \"kubernetes.io/projected/803b0121-2a6a-4ee8-b835-397db3b6bd43-kube-api-access-jvxb2\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.985693 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:06 crc kubenswrapper[5010]: I1126 17:49:06.986390 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-openstack-cell1-6gf92\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:07 crc kubenswrapper[5010]: I1126 17:49:07.063106 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:49:07 crc kubenswrapper[5010]: I1126 17:49:07.694345 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-openstack-cell1-6gf92"] Nov 26 17:49:07 crc kubenswrapper[5010]: I1126 17:49:07.719433 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:49:07 crc kubenswrapper[5010]: I1126 17:49:07.893955 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:49:07 crc kubenswrapper[5010]: E1126 17:49:07.894531 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:49:08 crc kubenswrapper[5010]: I1126 17:49:08.618727 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" event={"ID":"803b0121-2a6a-4ee8-b835-397db3b6bd43","Type":"ContainerStarted","Data":"ad01b0484e1add0c8f6bad6a894ee93c6280373ec79f4ef9116956b14307e409"} Nov 26 17:49:08 crc kubenswrapper[5010]: I1126 17:49:08.619072 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" event={"ID":"803b0121-2a6a-4ee8-b835-397db3b6bd43","Type":"ContainerStarted","Data":"69e386c22347bce1001d476b7e2e5d178fc0aa8b3fee273541458d439ff9a754"} Nov 26 17:49:22 crc kubenswrapper[5010]: I1126 17:49:22.893680 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:49:22 crc kubenswrapper[5010]: E1126 17:49:22.894836 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:49:37 crc kubenswrapper[5010]: I1126 17:49:37.892508 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:49:37 crc kubenswrapper[5010]: E1126 17:49:37.893566 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:49:52 crc kubenswrapper[5010]: I1126 17:49:52.892439 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:49:52 crc kubenswrapper[5010]: E1126 17:49:52.893777 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:50:03 crc kubenswrapper[5010]: I1126 17:50:03.892401 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:50:03 crc kubenswrapper[5010]: E1126 17:50:03.893186 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:50:14 crc kubenswrapper[5010]: I1126 17:50:14.892015 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:50:14 crc kubenswrapper[5010]: E1126 17:50:14.893021 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:50:28 crc kubenswrapper[5010]: I1126 17:50:28.892233 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:50:28 crc kubenswrapper[5010]: E1126 17:50:28.893599 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:50:43 crc kubenswrapper[5010]: I1126 17:50:43.893161 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:50:43 crc kubenswrapper[5010]: E1126 17:50:43.894532 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:50:57 crc kubenswrapper[5010]: I1126 17:50:57.892092 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:50:57 crc kubenswrapper[5010]: E1126 17:50:57.893027 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:51:08 crc kubenswrapper[5010]: I1126 17:51:08.891993 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:51:08 crc kubenswrapper[5010]: E1126 17:51:08.893558 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:51:20 crc kubenswrapper[5010]: I1126 17:51:20.892274 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:51:20 crc kubenswrapper[5010]: E1126 17:51:20.893122 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:51:34 crc kubenswrapper[5010]: I1126 17:51:34.892228 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:51:34 crc kubenswrapper[5010]: E1126 17:51:34.893373 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.649972 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" podStartSLOduration=155.078119815 podStartE2EDuration="2m35.649950098s" podCreationTimestamp="2025-11-26 17:49:06 +0000 UTC" firstStartedPulling="2025-11-26 17:49:07.719151036 +0000 UTC m=+8568.509868184" lastFinishedPulling="2025-11-26 17:49:08.290981289 +0000 UTC m=+8569.081698467" observedRunningTime="2025-11-26 17:49:08.639424471 +0000 UTC m=+8569.430141609" watchObservedRunningTime="2025-11-26 17:51:41.649950098 +0000 UTC m=+8722.440667256" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.655323 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jksg7"] Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.658451 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.673575 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jksg7"] Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.806231 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcmnr\" (UniqueName: \"kubernetes.io/projected/10909ba7-328e-470a-afa4-c8b771726303-kube-api-access-mcmnr\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.806639 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-utilities\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.806804 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-catalog-content\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.908724 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcmnr\" (UniqueName: \"kubernetes.io/projected/10909ba7-328e-470a-afa4-c8b771726303-kube-api-access-mcmnr\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.908796 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-utilities\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.908905 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-catalog-content\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.909614 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-catalog-content\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.909686 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-utilities\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.934637 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcmnr\" (UniqueName: \"kubernetes.io/projected/10909ba7-328e-470a-afa4-c8b771726303-kube-api-access-mcmnr\") pod \"community-operators-jksg7\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:41 crc kubenswrapper[5010]: I1126 17:51:41.983872 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:42 crc kubenswrapper[5010]: I1126 17:51:42.574722 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jksg7"] Nov 26 17:51:43 crc kubenswrapper[5010]: I1126 17:51:43.557800 5010 generic.go:334] "Generic (PLEG): container finished" podID="10909ba7-328e-470a-afa4-c8b771726303" containerID="7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6" exitCode=0 Nov 26 17:51:43 crc kubenswrapper[5010]: I1126 17:51:43.557877 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jksg7" event={"ID":"10909ba7-328e-470a-afa4-c8b771726303","Type":"ContainerDied","Data":"7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6"} Nov 26 17:51:43 crc kubenswrapper[5010]: I1126 17:51:43.558421 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jksg7" event={"ID":"10909ba7-328e-470a-afa4-c8b771726303","Type":"ContainerStarted","Data":"47f847b1224998c8765de01fdb65bf59ce05f89318b90ea1972e9d41734d1e9b"} Nov 26 17:51:44 crc kubenswrapper[5010]: I1126 17:51:44.576358 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jksg7" event={"ID":"10909ba7-328e-470a-afa4-c8b771726303","Type":"ContainerStarted","Data":"85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2"} Nov 26 17:51:45 crc kubenswrapper[5010]: I1126 17:51:45.597576 5010 generic.go:334] "Generic (PLEG): container finished" podID="10909ba7-328e-470a-afa4-c8b771726303" containerID="85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2" exitCode=0 Nov 26 17:51:45 crc kubenswrapper[5010]: I1126 17:51:45.597751 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jksg7" event={"ID":"10909ba7-328e-470a-afa4-c8b771726303","Type":"ContainerDied","Data":"85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2"} Nov 26 17:51:45 crc kubenswrapper[5010]: I1126 17:51:45.891626 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:51:45 crc kubenswrapper[5010]: E1126 17:51:45.892011 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:51:46 crc kubenswrapper[5010]: I1126 17:51:46.612763 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jksg7" event={"ID":"10909ba7-328e-470a-afa4-c8b771726303","Type":"ContainerStarted","Data":"dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6"} Nov 26 17:51:46 crc kubenswrapper[5010]: I1126 17:51:46.633157 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jksg7" podStartSLOduration=3.091153488 podStartE2EDuration="5.633138133s" podCreationTimestamp="2025-11-26 17:51:41 +0000 UTC" firstStartedPulling="2025-11-26 17:51:43.560641921 +0000 UTC m=+8724.351359079" lastFinishedPulling="2025-11-26 17:51:46.102626556 +0000 UTC m=+8726.893343724" observedRunningTime="2025-11-26 17:51:46.630421765 +0000 UTC m=+8727.421138923" watchObservedRunningTime="2025-11-26 17:51:46.633138133 +0000 UTC m=+8727.423855301" Nov 26 17:51:51 crc kubenswrapper[5010]: I1126 17:51:51.985110 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:51 crc kubenswrapper[5010]: I1126 17:51:51.985675 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:52 crc kubenswrapper[5010]: I1126 17:51:52.057459 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:52 crc kubenswrapper[5010]: I1126 17:51:52.752569 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:52 crc kubenswrapper[5010]: I1126 17:51:52.811358 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jksg7"] Nov 26 17:51:54 crc kubenswrapper[5010]: I1126 17:51:54.726141 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jksg7" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="registry-server" containerID="cri-o://dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6" gracePeriod=2 Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.233988 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.362160 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcmnr\" (UniqueName: \"kubernetes.io/projected/10909ba7-328e-470a-afa4-c8b771726303-kube-api-access-mcmnr\") pod \"10909ba7-328e-470a-afa4-c8b771726303\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.362412 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-catalog-content\") pod \"10909ba7-328e-470a-afa4-c8b771726303\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.362473 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-utilities\") pod \"10909ba7-328e-470a-afa4-c8b771726303\" (UID: \"10909ba7-328e-470a-afa4-c8b771726303\") " Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.364314 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-utilities" (OuterVolumeSpecName: "utilities") pod "10909ba7-328e-470a-afa4-c8b771726303" (UID: "10909ba7-328e-470a-afa4-c8b771726303"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.370631 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10909ba7-328e-470a-afa4-c8b771726303-kube-api-access-mcmnr" (OuterVolumeSpecName: "kube-api-access-mcmnr") pod "10909ba7-328e-470a-afa4-c8b771726303" (UID: "10909ba7-328e-470a-afa4-c8b771726303"). InnerVolumeSpecName "kube-api-access-mcmnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.419904 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "10909ba7-328e-470a-afa4-c8b771726303" (UID: "10909ba7-328e-470a-afa4-c8b771726303"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.466149 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcmnr\" (UniqueName: \"kubernetes.io/projected/10909ba7-328e-470a-afa4-c8b771726303-kube-api-access-mcmnr\") on node \"crc\" DevicePath \"\"" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.466177 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.466187 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/10909ba7-328e-470a-afa4-c8b771726303-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.741450 5010 generic.go:334] "Generic (PLEG): container finished" podID="10909ba7-328e-470a-afa4-c8b771726303" containerID="dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6" exitCode=0 Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.741504 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jksg7" event={"ID":"10909ba7-328e-470a-afa4-c8b771726303","Type":"ContainerDied","Data":"dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6"} Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.741528 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jksg7" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.741549 5010 scope.go:117] "RemoveContainer" containerID="dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.741536 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jksg7" event={"ID":"10909ba7-328e-470a-afa4-c8b771726303","Type":"ContainerDied","Data":"47f847b1224998c8765de01fdb65bf59ce05f89318b90ea1972e9d41734d1e9b"} Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.788529 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jksg7"] Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.791901 5010 scope.go:117] "RemoveContainer" containerID="85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.807804 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jksg7"] Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.827905 5010 scope.go:117] "RemoveContainer" containerID="7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.869574 5010 scope.go:117] "RemoveContainer" containerID="dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6" Nov 26 17:51:55 crc kubenswrapper[5010]: E1126 17:51:55.870121 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6\": container with ID starting with dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6 not found: ID does not exist" containerID="dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.870168 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6"} err="failed to get container status \"dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6\": rpc error: code = NotFound desc = could not find container \"dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6\": container with ID starting with dbf37c5bafd3ebcd280cbc9b52dd2bba11fda50e7ebe19d43d6f79d366f6dab6 not found: ID does not exist" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.870206 5010 scope.go:117] "RemoveContainer" containerID="85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2" Nov 26 17:51:55 crc kubenswrapper[5010]: E1126 17:51:55.870803 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2\": container with ID starting with 85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2 not found: ID does not exist" containerID="85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.870833 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2"} err="failed to get container status \"85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2\": rpc error: code = NotFound desc = could not find container \"85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2\": container with ID starting with 85ed96e3a5ed1b632658c77ca0c19375c063a2289a43dea970d3de8db7f3a8f2 not found: ID does not exist" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.870847 5010 scope.go:117] "RemoveContainer" containerID="7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6" Nov 26 17:51:55 crc kubenswrapper[5010]: E1126 17:51:55.871280 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6\": container with ID starting with 7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6 not found: ID does not exist" containerID="7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.871352 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6"} err="failed to get container status \"7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6\": rpc error: code = NotFound desc = could not find container \"7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6\": container with ID starting with 7e6ca8ca8cd55322095b294bb398a915ce873a53d885c2189b0db8bb4b69c5c6 not found: ID does not exist" Nov 26 17:51:55 crc kubenswrapper[5010]: I1126 17:51:55.920276 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10909ba7-328e-470a-afa4-c8b771726303" path="/var/lib/kubelet/pods/10909ba7-328e-470a-afa4-c8b771726303/volumes" Nov 26 17:51:56 crc kubenswrapper[5010]: I1126 17:51:56.893852 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:51:56 crc kubenswrapper[5010]: E1126 17:51:56.894871 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:52:08 crc kubenswrapper[5010]: I1126 17:52:08.892639 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:52:08 crc kubenswrapper[5010]: E1126 17:52:08.893900 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:52:20 crc kubenswrapper[5010]: I1126 17:52:20.892706 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:52:22 crc kubenswrapper[5010]: I1126 17:52:22.063697 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"516fdbcbc586d8c3f01cbfc3a0f286b0cd02e5668f8809602741ea746e0e3f69"} Nov 26 17:52:23 crc kubenswrapper[5010]: I1126 17:52:23.077530 5010 generic.go:334] "Generic (PLEG): container finished" podID="803b0121-2a6a-4ee8-b835-397db3b6bd43" containerID="ad01b0484e1add0c8f6bad6a894ee93c6280373ec79f4ef9116956b14307e409" exitCode=0 Nov 26 17:52:23 crc kubenswrapper[5010]: I1126 17:52:23.077862 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" event={"ID":"803b0121-2a6a-4ee8-b835-397db3b6bd43","Type":"ContainerDied","Data":"ad01b0484e1add0c8f6bad6a894ee93c6280373ec79f4ef9116956b14307e409"} Nov 26 17:52:24 crc kubenswrapper[5010]: I1126 17:52:24.702893 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="4456ea0d-01da-4a0a-b918-db686f0e23aa" containerName="galera" probeResult="failure" output="command timed out" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.069206 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.111430 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" event={"ID":"803b0121-2a6a-4ee8-b835-397db3b6bd43","Type":"ContainerDied","Data":"69e386c22347bce1001d476b7e2e5d178fc0aa8b3fee273541458d439ff9a754"} Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.111480 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69e386c22347bce1001d476b7e2e5d178fc0aa8b3fee273541458d439ff9a754" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.111592 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-openstack-cell1-6gf92" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.124749 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-0\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.124848 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cells-global-config-0\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.124896 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-combined-ca-bundle\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.124926 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-inventory\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.124967 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvxb2\" (UniqueName: \"kubernetes.io/projected/803b0121-2a6a-4ee8-b835-397db3b6bd43-kube-api-access-jvxb2\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.125069 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-0\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.125089 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-ssh-key\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.125211 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-1\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.125289 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-1\") pod \"803b0121-2a6a-4ee8-b835-397db3b6bd43\" (UID: \"803b0121-2a6a-4ee8-b835-397db3b6bd43\") " Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.138305 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.140156 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/803b0121-2a6a-4ee8-b835-397db3b6bd43-kube-api-access-jvxb2" (OuterVolumeSpecName: "kube-api-access-jvxb2") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "kube-api-access-jvxb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.163700 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.167916 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.176367 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.182352 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.183060 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-inventory" (OuterVolumeSpecName: "inventory") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.183443 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.190270 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "803b0121-2a6a-4ee8-b835-397db3b6bd43" (UID: "803b0121-2a6a-4ee8-b835-397db3b6bd43"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227379 5010 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227405 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227414 5010 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227424 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227433 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227441 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227449 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227459 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/803b0121-2a6a-4ee8-b835-397db3b6bd43-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.227469 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvxb2\" (UniqueName: \"kubernetes.io/projected/803b0121-2a6a-4ee8-b835-397db3b6bd43-kube-api-access-jvxb2\") on node \"crc\" DevicePath \"\"" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.811270 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-5t8x4"] Nov 26 17:52:25 crc kubenswrapper[5010]: E1126 17:52:25.812177 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="extract-utilities" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.812197 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="extract-utilities" Nov 26 17:52:25 crc kubenswrapper[5010]: E1126 17:52:25.812224 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="registry-server" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.812233 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="registry-server" Nov 26 17:52:25 crc kubenswrapper[5010]: E1126 17:52:25.812255 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="803b0121-2a6a-4ee8-b835-397db3b6bd43" containerName="nova-cell1-openstack-openstack-cell1" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.812265 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="803b0121-2a6a-4ee8-b835-397db3b6bd43" containerName="nova-cell1-openstack-openstack-cell1" Nov 26 17:52:25 crc kubenswrapper[5010]: E1126 17:52:25.812314 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="extract-content" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.812322 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="extract-content" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.812571 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="803b0121-2a6a-4ee8-b835-397db3b6bd43" containerName="nova-cell1-openstack-openstack-cell1" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.812590 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="10909ba7-328e-470a-afa4-c8b771726303" containerName="registry-server" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.813784 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.818026 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.819481 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.819635 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.819800 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.819906 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.824997 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-5t8x4"] Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.944494 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.944545 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.944578 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7szk\" (UniqueName: \"kubernetes.io/projected/8f5656c3-ac2f-4666-89a8-70a09fee6e15-kube-api-access-t7szk\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.944617 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-inventory\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.944662 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.944686 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:25 crc kubenswrapper[5010]: I1126 17:52:25.944786 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ssh-key\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.048005 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.048108 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.048196 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7szk\" (UniqueName: \"kubernetes.io/projected/8f5656c3-ac2f-4666-89a8-70a09fee6e15-kube-api-access-t7szk\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.048288 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-inventory\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.048426 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.048458 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.048675 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ssh-key\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.053123 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-inventory\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.053750 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-telemetry-combined-ca-bundle\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.054474 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-2\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.056137 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-0\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.056443 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-1\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.058050 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ssh-key\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.065354 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7szk\" (UniqueName: \"kubernetes.io/projected/8f5656c3-ac2f-4666-89a8-70a09fee6e15-kube-api-access-t7szk\") pod \"telemetry-openstack-openstack-cell1-5t8x4\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.164084 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:52:26 crc kubenswrapper[5010]: I1126 17:52:26.716216 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-openstack-openstack-cell1-5t8x4"] Nov 26 17:52:27 crc kubenswrapper[5010]: I1126 17:52:27.137265 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" event={"ID":"8f5656c3-ac2f-4666-89a8-70a09fee6e15","Type":"ContainerStarted","Data":"cfe15f7fcb36e0b41801231930318adbe0e275a4fd6280715e0a0cc8ecfc3bc6"} Nov 26 17:52:28 crc kubenswrapper[5010]: I1126 17:52:28.149980 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" event={"ID":"8f5656c3-ac2f-4666-89a8-70a09fee6e15","Type":"ContainerStarted","Data":"5d87a33164e2cd233fb53566288ef5171e8a47c7118855b13c57a401cd42df4a"} Nov 26 17:52:56 crc kubenswrapper[5010]: I1126 17:52:56.992855 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" podStartSLOduration=31.34821274 podStartE2EDuration="31.992831734s" podCreationTimestamp="2025-11-26 17:52:25 +0000 UTC" firstStartedPulling="2025-11-26 17:52:26.730067585 +0000 UTC m=+8767.520784743" lastFinishedPulling="2025-11-26 17:52:27.374686589 +0000 UTC m=+8768.165403737" observedRunningTime="2025-11-26 17:52:28.170311604 +0000 UTC m=+8768.961028752" watchObservedRunningTime="2025-11-26 17:52:56.992831734 +0000 UTC m=+8797.783548882" Nov 26 17:52:56 crc kubenswrapper[5010]: I1126 17:52:56.996650 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2qcdk"] Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.001550 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.010185 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qcdk"] Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.112941 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-catalog-content\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.113173 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dncb8\" (UniqueName: \"kubernetes.io/projected/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-kube-api-access-dncb8\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.113237 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-utilities\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.215542 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-catalog-content\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.215698 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dncb8\" (UniqueName: \"kubernetes.io/projected/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-kube-api-access-dncb8\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.215759 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-utilities\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.216477 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-utilities\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.216504 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-catalog-content\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.241862 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dncb8\" (UniqueName: \"kubernetes.io/projected/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-kube-api-access-dncb8\") pod \"redhat-marketplace-2qcdk\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.325324 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:52:57 crc kubenswrapper[5010]: I1126 17:52:57.851457 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qcdk"] Nov 26 17:52:57 crc kubenswrapper[5010]: W1126 17:52:57.860740 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod96ddf0fd_f32b_46ea_ab06_480dcfce2e6f.slice/crio-fbcd73fbd4c3c27b1ec6cf80f0ea45bf11a514850cb47a0dd605563d91c9c776 WatchSource:0}: Error finding container fbcd73fbd4c3c27b1ec6cf80f0ea45bf11a514850cb47a0dd605563d91c9c776: Status 404 returned error can't find the container with id fbcd73fbd4c3c27b1ec6cf80f0ea45bf11a514850cb47a0dd605563d91c9c776 Nov 26 17:52:58 crc kubenswrapper[5010]: I1126 17:52:58.526012 5010 generic.go:334] "Generic (PLEG): container finished" podID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerID="cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830" exitCode=0 Nov 26 17:52:58 crc kubenswrapper[5010]: I1126 17:52:58.526423 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qcdk" event={"ID":"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f","Type":"ContainerDied","Data":"cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830"} Nov 26 17:52:58 crc kubenswrapper[5010]: I1126 17:52:58.526458 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qcdk" event={"ID":"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f","Type":"ContainerStarted","Data":"fbcd73fbd4c3c27b1ec6cf80f0ea45bf11a514850cb47a0dd605563d91c9c776"} Nov 26 17:52:59 crc kubenswrapper[5010]: I1126 17:52:59.537856 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qcdk" event={"ID":"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f","Type":"ContainerStarted","Data":"51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773"} Nov 26 17:53:00 crc kubenswrapper[5010]: I1126 17:53:00.549129 5010 generic.go:334] "Generic (PLEG): container finished" podID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerID="51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773" exitCode=0 Nov 26 17:53:00 crc kubenswrapper[5010]: I1126 17:53:00.549178 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qcdk" event={"ID":"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f","Type":"ContainerDied","Data":"51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773"} Nov 26 17:53:01 crc kubenswrapper[5010]: I1126 17:53:01.560768 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qcdk" event={"ID":"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f","Type":"ContainerStarted","Data":"079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45"} Nov 26 17:53:01 crc kubenswrapper[5010]: I1126 17:53:01.586887 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2qcdk" podStartSLOduration=2.882946686 podStartE2EDuration="5.586866156s" podCreationTimestamp="2025-11-26 17:52:56 +0000 UTC" firstStartedPulling="2025-11-26 17:52:58.529309266 +0000 UTC m=+8799.320026414" lastFinishedPulling="2025-11-26 17:53:01.233228736 +0000 UTC m=+8802.023945884" observedRunningTime="2025-11-26 17:53:01.580374065 +0000 UTC m=+8802.371091223" watchObservedRunningTime="2025-11-26 17:53:01.586866156 +0000 UTC m=+8802.377583304" Nov 26 17:53:07 crc kubenswrapper[5010]: I1126 17:53:07.326359 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:53:07 crc kubenswrapper[5010]: I1126 17:53:07.327000 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:53:07 crc kubenswrapper[5010]: I1126 17:53:07.378479 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:53:07 crc kubenswrapper[5010]: I1126 17:53:07.716802 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:53:07 crc kubenswrapper[5010]: I1126 17:53:07.770953 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qcdk"] Nov 26 17:53:09 crc kubenswrapper[5010]: I1126 17:53:09.664650 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2qcdk" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="registry-server" containerID="cri-o://079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45" gracePeriod=2 Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.238267 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.261004 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-utilities\") pod \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.261166 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-catalog-content\") pod \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.261203 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dncb8\" (UniqueName: \"kubernetes.io/projected/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-kube-api-access-dncb8\") pod \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\" (UID: \"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f\") " Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.262053 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-utilities" (OuterVolumeSpecName: "utilities") pod "96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" (UID: "96ddf0fd-f32b-46ea-ab06-480dcfce2e6f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.268880 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-kube-api-access-dncb8" (OuterVolumeSpecName: "kube-api-access-dncb8") pod "96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" (UID: "96ddf0fd-f32b-46ea-ab06-480dcfce2e6f"). InnerVolumeSpecName "kube-api-access-dncb8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.282550 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" (UID: "96ddf0fd-f32b-46ea-ab06-480dcfce2e6f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.362680 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.362737 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.362753 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dncb8\" (UniqueName: \"kubernetes.io/projected/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f-kube-api-access-dncb8\") on node \"crc\" DevicePath \"\"" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.677946 5010 generic.go:334] "Generic (PLEG): container finished" podID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerID="079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45" exitCode=0 Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.678012 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2qcdk" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.678039 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qcdk" event={"ID":"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f","Type":"ContainerDied","Data":"079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45"} Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.678790 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2qcdk" event={"ID":"96ddf0fd-f32b-46ea-ab06-480dcfce2e6f","Type":"ContainerDied","Data":"fbcd73fbd4c3c27b1ec6cf80f0ea45bf11a514850cb47a0dd605563d91c9c776"} Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.678823 5010 scope.go:117] "RemoveContainer" containerID="079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.713498 5010 scope.go:117] "RemoveContainer" containerID="51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.724227 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qcdk"] Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.736169 5010 scope.go:117] "RemoveContainer" containerID="cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.736835 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2qcdk"] Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.790207 5010 scope.go:117] "RemoveContainer" containerID="079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45" Nov 26 17:53:10 crc kubenswrapper[5010]: E1126 17:53:10.790884 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45\": container with ID starting with 079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45 not found: ID does not exist" containerID="079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.790944 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45"} err="failed to get container status \"079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45\": rpc error: code = NotFound desc = could not find container \"079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45\": container with ID starting with 079bf7b4e0df0ab817aec435937ae77e7cbc994f83c4fc20cf2b8e1ee4320d45 not found: ID does not exist" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.791027 5010 scope.go:117] "RemoveContainer" containerID="51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773" Nov 26 17:53:10 crc kubenswrapper[5010]: E1126 17:53:10.791511 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773\": container with ID starting with 51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773 not found: ID does not exist" containerID="51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.791558 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773"} err="failed to get container status \"51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773\": rpc error: code = NotFound desc = could not find container \"51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773\": container with ID starting with 51e51fc79942b42f449e7b03f62f44515aebf2c0ba26ea5480d5bd5639d32773 not found: ID does not exist" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.791590 5010 scope.go:117] "RemoveContainer" containerID="cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830" Nov 26 17:53:10 crc kubenswrapper[5010]: E1126 17:53:10.792095 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830\": container with ID starting with cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830 not found: ID does not exist" containerID="cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830" Nov 26 17:53:10 crc kubenswrapper[5010]: I1126 17:53:10.792126 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830"} err="failed to get container status \"cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830\": rpc error: code = NotFound desc = could not find container \"cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830\": container with ID starting with cc3944a665336d51c868024f7c4f1d9f0cef72ae8bc24df3ea8a6e9198d3d830 not found: ID does not exist" Nov 26 17:53:11 crc kubenswrapper[5010]: I1126 17:53:11.912168 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" path="/var/lib/kubelet/pods/96ddf0fd-f32b-46ea-ab06-480dcfce2e6f/volumes" Nov 26 17:54:35 crc kubenswrapper[5010]: I1126 17:54:35.963055 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zq576"] Nov 26 17:54:35 crc kubenswrapper[5010]: E1126 17:54:35.964270 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="extract-utilities" Nov 26 17:54:35 crc kubenswrapper[5010]: I1126 17:54:35.964288 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="extract-utilities" Nov 26 17:54:35 crc kubenswrapper[5010]: E1126 17:54:35.964312 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="extract-content" Nov 26 17:54:35 crc kubenswrapper[5010]: I1126 17:54:35.964319 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="extract-content" Nov 26 17:54:35 crc kubenswrapper[5010]: E1126 17:54:35.964360 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="registry-server" Nov 26 17:54:35 crc kubenswrapper[5010]: I1126 17:54:35.964369 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="registry-server" Nov 26 17:54:35 crc kubenswrapper[5010]: I1126 17:54:35.964611 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="96ddf0fd-f32b-46ea-ab06-480dcfce2e6f" containerName="registry-server" Nov 26 17:54:35 crc kubenswrapper[5010]: I1126 17:54:35.966664 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:35 crc kubenswrapper[5010]: I1126 17:54:35.978965 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zq576"] Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.030196 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96aa6836-3869-414a-8c82-73debe80e38a-catalog-content\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.030568 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsb2k\" (UniqueName: \"kubernetes.io/projected/96aa6836-3869-414a-8c82-73debe80e38a-kube-api-access-wsb2k\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.030754 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96aa6836-3869-414a-8c82-73debe80e38a-utilities\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.133011 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsb2k\" (UniqueName: \"kubernetes.io/projected/96aa6836-3869-414a-8c82-73debe80e38a-kube-api-access-wsb2k\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.133270 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96aa6836-3869-414a-8c82-73debe80e38a-utilities\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.133491 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96aa6836-3869-414a-8c82-73debe80e38a-catalog-content\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.133797 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/96aa6836-3869-414a-8c82-73debe80e38a-utilities\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.133888 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/96aa6836-3869-414a-8c82-73debe80e38a-catalog-content\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.156379 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsb2k\" (UniqueName: \"kubernetes.io/projected/96aa6836-3869-414a-8c82-73debe80e38a-kube-api-access-wsb2k\") pod \"redhat-operators-zq576\" (UID: \"96aa6836-3869-414a-8c82-73debe80e38a\") " pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.351425 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.839975 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zq576"] Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.959906 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-c5tp8"] Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.963189 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:36 crc kubenswrapper[5010]: I1126 17:54:36.981157 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c5tp8"] Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.055576 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdzk4\" (UniqueName: \"kubernetes.io/projected/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-kube-api-access-jdzk4\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.056087 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-utilities\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.056246 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-catalog-content\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.158683 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-utilities\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.158797 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-catalog-content\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.158986 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdzk4\" (UniqueName: \"kubernetes.io/projected/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-kube-api-access-jdzk4\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.159224 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-utilities\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.159447 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-catalog-content\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.220616 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdzk4\" (UniqueName: \"kubernetes.io/projected/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-kube-api-access-jdzk4\") pod \"certified-operators-c5tp8\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.303099 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.786289 5010 generic.go:334] "Generic (PLEG): container finished" podID="96aa6836-3869-414a-8c82-73debe80e38a" containerID="78318750f94c22cd4df99ee81925227437256ba6deb282dc8807d2f2007741d4" exitCode=0 Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.786579 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zq576" event={"ID":"96aa6836-3869-414a-8c82-73debe80e38a","Type":"ContainerDied","Data":"78318750f94c22cd4df99ee81925227437256ba6deb282dc8807d2f2007741d4"} Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.786606 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zq576" event={"ID":"96aa6836-3869-414a-8c82-73debe80e38a","Type":"ContainerStarted","Data":"f9d9b5f6183b26b3b398297fb492e54ef8d1b83079ffd67696435191f3b4c579"} Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.790042 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 17:54:37 crc kubenswrapper[5010]: I1126 17:54:37.878630 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c5tp8"] Nov 26 17:54:38 crc kubenswrapper[5010]: I1126 17:54:38.804573 5010 generic.go:334] "Generic (PLEG): container finished" podID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerID="f87fea71da75ffcabe23d2b86bb2e2a077393fc3b17b9e48e9795066eb3dcbab" exitCode=0 Nov 26 17:54:38 crc kubenswrapper[5010]: I1126 17:54:38.804661 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5tp8" event={"ID":"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff","Type":"ContainerDied","Data":"f87fea71da75ffcabe23d2b86bb2e2a077393fc3b17b9e48e9795066eb3dcbab"} Nov 26 17:54:38 crc kubenswrapper[5010]: I1126 17:54:38.804925 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5tp8" event={"ID":"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff","Type":"ContainerStarted","Data":"e97c4bea1fa10a2b8ff945f8d29bad8ad30df27667e054fef19636dd1d2b668a"} Nov 26 17:54:41 crc kubenswrapper[5010]: I1126 17:54:41.423127 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:54:41 crc kubenswrapper[5010]: I1126 17:54:41.423643 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:54:48 crc kubenswrapper[5010]: I1126 17:54:48.908090 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zq576" event={"ID":"96aa6836-3869-414a-8c82-73debe80e38a","Type":"ContainerStarted","Data":"bc1bb56174851266b7a6eb36c7c80133a29efac7aff89e48c0feae1f9616d87c"} Nov 26 17:54:48 crc kubenswrapper[5010]: I1126 17:54:48.912162 5010 generic.go:334] "Generic (PLEG): container finished" podID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerID="bfd2f125e4bdbd183239e3310fb05999eef199d051a32d967dba998a113c90fc" exitCode=0 Nov 26 17:54:48 crc kubenswrapper[5010]: I1126 17:54:48.912210 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5tp8" event={"ID":"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff","Type":"ContainerDied","Data":"bfd2f125e4bdbd183239e3310fb05999eef199d051a32d967dba998a113c90fc"} Nov 26 17:54:50 crc kubenswrapper[5010]: I1126 17:54:50.938892 5010 generic.go:334] "Generic (PLEG): container finished" podID="96aa6836-3869-414a-8c82-73debe80e38a" containerID="bc1bb56174851266b7a6eb36c7c80133a29efac7aff89e48c0feae1f9616d87c" exitCode=0 Nov 26 17:54:50 crc kubenswrapper[5010]: I1126 17:54:50.938983 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zq576" event={"ID":"96aa6836-3869-414a-8c82-73debe80e38a","Type":"ContainerDied","Data":"bc1bb56174851266b7a6eb36c7c80133a29efac7aff89e48c0feae1f9616d87c"} Nov 26 17:54:50 crc kubenswrapper[5010]: I1126 17:54:50.943793 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5tp8" event={"ID":"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff","Type":"ContainerStarted","Data":"288aae808187f79aa64c666ecf468e66f878c5a7eafd47c2c0e8d88be5c363c5"} Nov 26 17:54:50 crc kubenswrapper[5010]: I1126 17:54:50.992765 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-c5tp8" podStartSLOduration=4.045001339 podStartE2EDuration="14.992744623s" podCreationTimestamp="2025-11-26 17:54:36 +0000 UTC" firstStartedPulling="2025-11-26 17:54:38.806802602 +0000 UTC m=+8899.597519770" lastFinishedPulling="2025-11-26 17:54:49.754545896 +0000 UTC m=+8910.545263054" observedRunningTime="2025-11-26 17:54:50.988505648 +0000 UTC m=+8911.779222806" watchObservedRunningTime="2025-11-26 17:54:50.992744623 +0000 UTC m=+8911.783461781" Nov 26 17:54:51 crc kubenswrapper[5010]: I1126 17:54:51.955092 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zq576" event={"ID":"96aa6836-3869-414a-8c82-73debe80e38a","Type":"ContainerStarted","Data":"85bf27ad9e508f0eb83b29f713ea86f18ac10aefa3dbf63974d3a11808e511d7"} Nov 26 17:54:51 crc kubenswrapper[5010]: I1126 17:54:51.987672 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zq576" podStartSLOduration=3.308853595 podStartE2EDuration="16.987649833s" podCreationTimestamp="2025-11-26 17:54:35 +0000 UTC" firstStartedPulling="2025-11-26 17:54:37.789816253 +0000 UTC m=+8898.580533401" lastFinishedPulling="2025-11-26 17:54:51.468612491 +0000 UTC m=+8912.259329639" observedRunningTime="2025-11-26 17:54:51.974029994 +0000 UTC m=+8912.764747152" watchObservedRunningTime="2025-11-26 17:54:51.987649833 +0000 UTC m=+8912.778366991" Nov 26 17:54:56 crc kubenswrapper[5010]: I1126 17:54:56.351852 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:56 crc kubenswrapper[5010]: I1126 17:54:56.352751 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:54:57 crc kubenswrapper[5010]: I1126 17:54:57.304117 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:57 crc kubenswrapper[5010]: I1126 17:54:57.304621 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:57 crc kubenswrapper[5010]: I1126 17:54:57.362144 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.062273 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zq576" podUID="96aa6836-3869-414a-8c82-73debe80e38a" containerName="registry-server" probeResult="failure" output=< Nov 26 17:54:58 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 17:54:58 crc kubenswrapper[5010]: > Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.091361 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.205258 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-c5tp8"] Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.247331 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c8fm9"] Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.247560 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c8fm9" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="registry-server" containerID="cri-o://1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46" gracePeriod=2 Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.792102 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.872438 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-59db6\" (UniqueName: \"kubernetes.io/projected/a53dffa9-5f15-4495-a017-c496e0218280-kube-api-access-59db6\") pod \"a53dffa9-5f15-4495-a017-c496e0218280\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.872601 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-catalog-content\") pod \"a53dffa9-5f15-4495-a017-c496e0218280\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.872648 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-utilities\") pod \"a53dffa9-5f15-4495-a017-c496e0218280\" (UID: \"a53dffa9-5f15-4495-a017-c496e0218280\") " Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.873355 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-utilities" (OuterVolumeSpecName: "utilities") pod "a53dffa9-5f15-4495-a017-c496e0218280" (UID: "a53dffa9-5f15-4495-a017-c496e0218280"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.878184 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a53dffa9-5f15-4495-a017-c496e0218280-kube-api-access-59db6" (OuterVolumeSpecName: "kube-api-access-59db6") pod "a53dffa9-5f15-4495-a017-c496e0218280" (UID: "a53dffa9-5f15-4495-a017-c496e0218280"). InnerVolumeSpecName "kube-api-access-59db6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.917517 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a53dffa9-5f15-4495-a017-c496e0218280" (UID: "a53dffa9-5f15-4495-a017-c496e0218280"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.977319 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-59db6\" (UniqueName: \"kubernetes.io/projected/a53dffa9-5f15-4495-a017-c496e0218280-kube-api-access-59db6\") on node \"crc\" DevicePath \"\"" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.977361 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:54:58 crc kubenswrapper[5010]: I1126 17:54:58.977375 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a53dffa9-5f15-4495-a017-c496e0218280-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.034755 5010 generic.go:334] "Generic (PLEG): container finished" podID="a53dffa9-5f15-4495-a017-c496e0218280" containerID="1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46" exitCode=0 Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.034840 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c8fm9" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.034865 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8fm9" event={"ID":"a53dffa9-5f15-4495-a017-c496e0218280","Type":"ContainerDied","Data":"1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46"} Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.034953 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c8fm9" event={"ID":"a53dffa9-5f15-4495-a017-c496e0218280","Type":"ContainerDied","Data":"ec8861d86f9ad49c771244ef1c09fa3d1f4e1505742e92e4f1225f780132e27f"} Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.034975 5010 scope.go:117] "RemoveContainer" containerID="1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.069776 5010 scope.go:117] "RemoveContainer" containerID="bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.077032 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c8fm9"] Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.086363 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c8fm9"] Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.102068 5010 scope.go:117] "RemoveContainer" containerID="034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.167259 5010 scope.go:117] "RemoveContainer" containerID="1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46" Nov 26 17:54:59 crc kubenswrapper[5010]: E1126 17:54:59.167675 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46\": container with ID starting with 1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46 not found: ID does not exist" containerID="1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.167724 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46"} err="failed to get container status \"1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46\": rpc error: code = NotFound desc = could not find container \"1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46\": container with ID starting with 1612fa1c352c3d48313c2221a0896b44d0043f0a0a7d0f6f67313646e6817d46 not found: ID does not exist" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.167744 5010 scope.go:117] "RemoveContainer" containerID="bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58" Nov 26 17:54:59 crc kubenswrapper[5010]: E1126 17:54:59.168109 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58\": container with ID starting with bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58 not found: ID does not exist" containerID="bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.168131 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58"} err="failed to get container status \"bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58\": rpc error: code = NotFound desc = could not find container \"bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58\": container with ID starting with bd8c0847546be4fd7b707de317addc57e87e350014503109244985e77d33af58 not found: ID does not exist" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.168144 5010 scope.go:117] "RemoveContainer" containerID="034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355" Nov 26 17:54:59 crc kubenswrapper[5010]: E1126 17:54:59.168395 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355\": container with ID starting with 034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355 not found: ID does not exist" containerID="034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.168416 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355"} err="failed to get container status \"034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355\": rpc error: code = NotFound desc = could not find container \"034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355\": container with ID starting with 034340497e400cd5a6800de022bb281cb62753aa9e06dcd092a18e17e6a75355 not found: ID does not exist" Nov 26 17:54:59 crc kubenswrapper[5010]: I1126 17:54:59.905799 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a53dffa9-5f15-4495-a017-c496e0218280" path="/var/lib/kubelet/pods/a53dffa9-5f15-4495-a017-c496e0218280/volumes" Nov 26 17:55:06 crc kubenswrapper[5010]: I1126 17:55:06.403250 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:55:06 crc kubenswrapper[5010]: I1126 17:55:06.459434 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zq576" Nov 26 17:55:06 crc kubenswrapper[5010]: I1126 17:55:06.999392 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zq576"] Nov 26 17:55:07 crc kubenswrapper[5010]: I1126 17:55:07.368253 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-th8t9"] Nov 26 17:55:07 crc kubenswrapper[5010]: I1126 17:55:07.368527 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-th8t9" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="registry-server" containerID="cri-o://843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325" gracePeriod=2 Nov 26 17:55:07 crc kubenswrapper[5010]: I1126 17:55:07.912200 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.028260 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-utilities\") pod \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.028372 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr6pd\" (UniqueName: \"kubernetes.io/projected/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-kube-api-access-pr6pd\") pod \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.028421 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-catalog-content\") pod \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\" (UID: \"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec\") " Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.029998 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-utilities" (OuterVolumeSpecName: "utilities") pod "8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" (UID: "8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.048095 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-kube-api-access-pr6pd" (OuterVolumeSpecName: "kube-api-access-pr6pd") pod "8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" (UID: "8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec"). InnerVolumeSpecName "kube-api-access-pr6pd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.130559 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.130592 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr6pd\" (UniqueName: \"kubernetes.io/projected/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-kube-api-access-pr6pd\") on node \"crc\" DevicePath \"\"" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.130926 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" (UID: "8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.145064 5010 generic.go:334] "Generic (PLEG): container finished" podID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerID="843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325" exitCode=0 Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.145510 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-th8t9" event={"ID":"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec","Type":"ContainerDied","Data":"843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325"} Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.145538 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-th8t9" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.145572 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-th8t9" event={"ID":"8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec","Type":"ContainerDied","Data":"061a907c809eb8abed368ca02ac82f51d0bb13e650a69930bbdccfdc8d85a617"} Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.145595 5010 scope.go:117] "RemoveContainer" containerID="843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.167011 5010 scope.go:117] "RemoveContainer" containerID="13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.184136 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-th8t9"] Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.192069 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-th8t9"] Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.195200 5010 scope.go:117] "RemoveContainer" containerID="1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.233481 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.242151 5010 scope.go:117] "RemoveContainer" containerID="843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325" Nov 26 17:55:08 crc kubenswrapper[5010]: E1126 17:55:08.242502 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325\": container with ID starting with 843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325 not found: ID does not exist" containerID="843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.242548 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325"} err="failed to get container status \"843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325\": rpc error: code = NotFound desc = could not find container \"843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325\": container with ID starting with 843d17aa691b009cef1f89e34bca40fb88a82854513b662c34c4c2b2e5f45325 not found: ID does not exist" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.242580 5010 scope.go:117] "RemoveContainer" containerID="13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff" Nov 26 17:55:08 crc kubenswrapper[5010]: E1126 17:55:08.242860 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff\": container with ID starting with 13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff not found: ID does not exist" containerID="13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.242898 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff"} err="failed to get container status \"13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff\": rpc error: code = NotFound desc = could not find container \"13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff\": container with ID starting with 13845a24b65b8f82b2d30671ddefecfbd80e26b82c7c2f2b9ab248e67cb50eff not found: ID does not exist" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.242926 5010 scope.go:117] "RemoveContainer" containerID="1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee" Nov 26 17:55:08 crc kubenswrapper[5010]: E1126 17:55:08.243861 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee\": container with ID starting with 1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee not found: ID does not exist" containerID="1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee" Nov 26 17:55:08 crc kubenswrapper[5010]: I1126 17:55:08.243899 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee"} err="failed to get container status \"1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee\": rpc error: code = NotFound desc = could not find container \"1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee\": container with ID starting with 1fc4dfbddd1cebf4c822f194268737d1f69f496f3ec040ee8bf5809753dd9fee not found: ID does not exist" Nov 26 17:55:09 crc kubenswrapper[5010]: I1126 17:55:09.907942 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" path="/var/lib/kubelet/pods/8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec/volumes" Nov 26 17:55:11 crc kubenswrapper[5010]: I1126 17:55:11.423262 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:55:11 crc kubenswrapper[5010]: I1126 17:55:11.423357 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.422760 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.424984 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.425103 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.426620 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"516fdbcbc586d8c3f01cbfc3a0f286b0cd02e5668f8809602741ea746e0e3f69"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.426805 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://516fdbcbc586d8c3f01cbfc3a0f286b0cd02e5668f8809602741ea746e0e3f69" gracePeriod=600 Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.729807 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="516fdbcbc586d8c3f01cbfc3a0f286b0cd02e5668f8809602741ea746e0e3f69" exitCode=0 Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.730103 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"516fdbcbc586d8c3f01cbfc3a0f286b0cd02e5668f8809602741ea746e0e3f69"} Nov 26 17:55:41 crc kubenswrapper[5010]: I1126 17:55:41.730135 5010 scope.go:117] "RemoveContainer" containerID="1c2fe6e12b94be405ddfce4dd2c67579f87d3a7df9dd2628074dc93ced4a252f" Nov 26 17:55:42 crc kubenswrapper[5010]: I1126 17:55:42.747078 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b"} Nov 26 17:56:35 crc kubenswrapper[5010]: I1126 17:56:35.355188 5010 generic.go:334] "Generic (PLEG): container finished" podID="8f5656c3-ac2f-4666-89a8-70a09fee6e15" containerID="5d87a33164e2cd233fb53566288ef5171e8a47c7118855b13c57a401cd42df4a" exitCode=0 Nov 26 17:56:35 crc kubenswrapper[5010]: I1126 17:56:35.355261 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" event={"ID":"8f5656c3-ac2f-4666-89a8-70a09fee6e15","Type":"ContainerDied","Data":"5d87a33164e2cd233fb53566288ef5171e8a47c7118855b13c57a401cd42df4a"} Nov 26 17:56:36 crc kubenswrapper[5010]: I1126 17:56:36.937125 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.125685 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7szk\" (UniqueName: \"kubernetes.io/projected/8f5656c3-ac2f-4666-89a8-70a09fee6e15-kube-api-access-t7szk\") pod \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.125881 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-0\") pod \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.126105 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-inventory\") pod \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.126182 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-1\") pod \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.126224 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-telemetry-combined-ca-bundle\") pod \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.126266 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-2\") pod \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.126318 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ssh-key\") pod \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\" (UID: \"8f5656c3-ac2f-4666-89a8-70a09fee6e15\") " Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.132348 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f5656c3-ac2f-4666-89a8-70a09fee6e15-kube-api-access-t7szk" (OuterVolumeSpecName: "kube-api-access-t7szk") pod "8f5656c3-ac2f-4666-89a8-70a09fee6e15" (UID: "8f5656c3-ac2f-4666-89a8-70a09fee6e15"). InnerVolumeSpecName "kube-api-access-t7szk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.133606 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "8f5656c3-ac2f-4666-89a8-70a09fee6e15" (UID: "8f5656c3-ac2f-4666-89a8-70a09fee6e15"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.164136 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-inventory" (OuterVolumeSpecName: "inventory") pod "8f5656c3-ac2f-4666-89a8-70a09fee6e15" (UID: "8f5656c3-ac2f-4666-89a8-70a09fee6e15"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.166244 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "8f5656c3-ac2f-4666-89a8-70a09fee6e15" (UID: "8f5656c3-ac2f-4666-89a8-70a09fee6e15"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.173215 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "8f5656c3-ac2f-4666-89a8-70a09fee6e15" (UID: "8f5656c3-ac2f-4666-89a8-70a09fee6e15"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.178842 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "8f5656c3-ac2f-4666-89a8-70a09fee6e15" (UID: "8f5656c3-ac2f-4666-89a8-70a09fee6e15"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.183685 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "8f5656c3-ac2f-4666-89a8-70a09fee6e15" (UID: "8f5656c3-ac2f-4666-89a8-70a09fee6e15"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.229605 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7szk\" (UniqueName: \"kubernetes.io/projected/8f5656c3-ac2f-4666-89a8-70a09fee6e15-kube-api-access-t7szk\") on node \"crc\" DevicePath \"\"" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.229657 5010 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.229672 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.229687 5010 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.229698 5010 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.229711 5010 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.229732 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/8f5656c3-ac2f-4666-89a8-70a09fee6e15-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.385132 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" event={"ID":"8f5656c3-ac2f-4666-89a8-70a09fee6e15","Type":"ContainerDied","Data":"cfe15f7fcb36e0b41801231930318adbe0e275a4fd6280715e0a0cc8ecfc3bc6"} Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.385193 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cfe15f7fcb36e0b41801231930318adbe0e275a4fd6280715e0a0cc8ecfc3bc6" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.385223 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-openstack-openstack-cell1-5t8x4" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.512569 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-qcxvr"] Nov 26 17:56:37 crc kubenswrapper[5010]: E1126 17:56:37.513982 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="extract-content" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514012 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="extract-content" Nov 26 17:56:37 crc kubenswrapper[5010]: E1126 17:56:37.514052 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="extract-content" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514062 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="extract-content" Nov 26 17:56:37 crc kubenswrapper[5010]: E1126 17:56:37.514079 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="registry-server" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514089 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="registry-server" Nov 26 17:56:37 crc kubenswrapper[5010]: E1126 17:56:37.514117 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="extract-utilities" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514126 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="extract-utilities" Nov 26 17:56:37 crc kubenswrapper[5010]: E1126 17:56:37.514145 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f5656c3-ac2f-4666-89a8-70a09fee6e15" containerName="telemetry-openstack-openstack-cell1" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514154 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f5656c3-ac2f-4666-89a8-70a09fee6e15" containerName="telemetry-openstack-openstack-cell1" Nov 26 17:56:37 crc kubenswrapper[5010]: E1126 17:56:37.514189 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="extract-utilities" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514201 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="extract-utilities" Nov 26 17:56:37 crc kubenswrapper[5010]: E1126 17:56:37.514215 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="registry-server" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514224 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="registry-server" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514567 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a6e2eaf-7fdd-4dd0-96ef-0f4b026e0aec" containerName="registry-server" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514623 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f5656c3-ac2f-4666-89a8-70a09fee6e15" containerName="telemetry-openstack-openstack-cell1" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.514642 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a53dffa9-5f15-4495-a017-c496e0218280" containerName="registry-server" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.515984 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.522265 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.522520 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.522545 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.535121 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.535355 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-sriov-agent-neutron-config" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.537994 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.538076 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.538205 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.538335 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.538394 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dbzlm\" (UniqueName: \"kubernetes.io/projected/0f4095f7-6cba-45da-b62e-8e39587d45b0-kube-api-access-dbzlm\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.538985 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-qcxvr"] Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.640977 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.641042 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dbzlm\" (UniqueName: \"kubernetes.io/projected/0f4095f7-6cba-45da-b62e-8e39587d45b0-kube-api-access-dbzlm\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.641123 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.641172 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.641271 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.647333 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-inventory\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.647403 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-agent-neutron-config-0\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.648377 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-combined-ca-bundle\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.648607 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-ssh-key\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.662833 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dbzlm\" (UniqueName: \"kubernetes.io/projected/0f4095f7-6cba-45da-b62e-8e39587d45b0-kube-api-access-dbzlm\") pod \"neutron-sriov-openstack-openstack-cell1-qcxvr\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:37 crc kubenswrapper[5010]: I1126 17:56:37.837746 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 17:56:38 crc kubenswrapper[5010]: I1126 17:56:38.444921 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-sriov-openstack-openstack-cell1-qcxvr"] Nov 26 17:56:39 crc kubenswrapper[5010]: I1126 17:56:39.413743 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" event={"ID":"0f4095f7-6cba-45da-b62e-8e39587d45b0","Type":"ContainerStarted","Data":"ffd5eb48dfceddf66c0934dcaadb092c45517660120051b3a6881bdd5b4269a8"} Nov 26 17:56:39 crc kubenswrapper[5010]: I1126 17:56:39.414503 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" event={"ID":"0f4095f7-6cba-45da-b62e-8e39587d45b0","Type":"ContainerStarted","Data":"f87d38e0e481f0aa30619766cb40161d518762ed69fcc2c87bd1a92bd15f2d03"} Nov 26 17:56:39 crc kubenswrapper[5010]: I1126 17:56:39.435719 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" podStartSLOduration=1.855941224 podStartE2EDuration="2.435691094s" podCreationTimestamp="2025-11-26 17:56:37 +0000 UTC" firstStartedPulling="2025-11-26 17:56:38.447480251 +0000 UTC m=+9019.238197449" lastFinishedPulling="2025-11-26 17:56:39.027230171 +0000 UTC m=+9019.817947319" observedRunningTime="2025-11-26 17:56:39.434819672 +0000 UTC m=+9020.225536810" watchObservedRunningTime="2025-11-26 17:56:39.435691094 +0000 UTC m=+9020.226408242" Nov 26 17:56:55 crc kubenswrapper[5010]: I1126 17:56:55.706156 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="536595b1-5ba9-4588-8e64-32480adb79ea" containerName="galera" probeResult="failure" output="command timed out" Nov 26 17:57:41 crc kubenswrapper[5010]: I1126 17:57:41.422369 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:57:41 crc kubenswrapper[5010]: I1126 17:57:41.422955 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:58:11 crc kubenswrapper[5010]: I1126 17:58:11.422906 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:58:11 crc kubenswrapper[5010]: I1126 17:58:11.423815 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:58:41 crc kubenswrapper[5010]: I1126 17:58:41.422822 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 17:58:41 crc kubenswrapper[5010]: I1126 17:58:41.423444 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 17:58:41 crc kubenswrapper[5010]: I1126 17:58:41.423501 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 17:58:41 crc kubenswrapper[5010]: I1126 17:58:41.424584 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 17:58:41 crc kubenswrapper[5010]: I1126 17:58:41.424657 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" gracePeriod=600 Nov 26 17:58:41 crc kubenswrapper[5010]: E1126 17:58:41.565673 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:58:42 crc kubenswrapper[5010]: I1126 17:58:42.223572 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" exitCode=0 Nov 26 17:58:42 crc kubenswrapper[5010]: I1126 17:58:42.223662 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b"} Nov 26 17:58:42 crc kubenswrapper[5010]: I1126 17:58:42.223737 5010 scope.go:117] "RemoveContainer" containerID="516fdbcbc586d8c3f01cbfc3a0f286b0cd02e5668f8809602741ea746e0e3f69" Nov 26 17:58:42 crc kubenswrapper[5010]: I1126 17:58:42.224890 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 17:58:42 crc kubenswrapper[5010]: E1126 17:58:42.225541 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:58:56 crc kubenswrapper[5010]: I1126 17:58:56.892002 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 17:58:56 crc kubenswrapper[5010]: E1126 17:58:56.893094 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:59:10 crc kubenswrapper[5010]: I1126 17:59:10.892232 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 17:59:10 crc kubenswrapper[5010]: E1126 17:59:10.893016 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:59:24 crc kubenswrapper[5010]: I1126 17:59:24.892062 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 17:59:24 crc kubenswrapper[5010]: E1126 17:59:24.893114 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:59:38 crc kubenswrapper[5010]: I1126 17:59:38.892915 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 17:59:38 crc kubenswrapper[5010]: E1126 17:59:38.894373 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 17:59:52 crc kubenswrapper[5010]: I1126 17:59:52.892205 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 17:59:52 crc kubenswrapper[5010]: E1126 17:59:52.893752 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.183552 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2"] Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.196740 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.201856 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.206547 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.220301 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2"] Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.314524 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91378621-a736-46f4-8dd4-d246b87cce88-secret-volume\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.314796 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91378621-a736-46f4-8dd4-d246b87cce88-config-volume\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.314841 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkr87\" (UniqueName: \"kubernetes.io/projected/91378621-a736-46f4-8dd4-d246b87cce88-kube-api-access-tkr87\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.416549 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91378621-a736-46f4-8dd4-d246b87cce88-config-volume\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.416617 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkr87\" (UniqueName: \"kubernetes.io/projected/91378621-a736-46f4-8dd4-d246b87cce88-kube-api-access-tkr87\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.416773 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91378621-a736-46f4-8dd4-d246b87cce88-secret-volume\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.417599 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91378621-a736-46f4-8dd4-d246b87cce88-config-volume\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.427752 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91378621-a736-46f4-8dd4-d246b87cce88-secret-volume\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.444182 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkr87\" (UniqueName: \"kubernetes.io/projected/91378621-a736-46f4-8dd4-d246b87cce88-kube-api-access-tkr87\") pod \"collect-profiles-29403000-pk7p2\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:00 crc kubenswrapper[5010]: I1126 18:00:00.532000 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:01 crc kubenswrapper[5010]: W1126 18:00:01.017812 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod91378621_a736_46f4_8dd4_d246b87cce88.slice/crio-4369747043b33e8ca4543923c398307f861a05fe3fcc882caf1a5bb04a58b1a7 WatchSource:0}: Error finding container 4369747043b33e8ca4543923c398307f861a05fe3fcc882caf1a5bb04a58b1a7: Status 404 returned error can't find the container with id 4369747043b33e8ca4543923c398307f861a05fe3fcc882caf1a5bb04a58b1a7 Nov 26 18:00:01 crc kubenswrapper[5010]: I1126 18:00:01.019895 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2"] Nov 26 18:00:01 crc kubenswrapper[5010]: I1126 18:00:01.234228 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" event={"ID":"91378621-a736-46f4-8dd4-d246b87cce88","Type":"ContainerStarted","Data":"f27b7f0847ee74d6d8a80e9731dce69b7fec329bf2d255d23753d49c8fd5b78f"} Nov 26 18:00:01 crc kubenswrapper[5010]: I1126 18:00:01.234518 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" event={"ID":"91378621-a736-46f4-8dd4-d246b87cce88","Type":"ContainerStarted","Data":"4369747043b33e8ca4543923c398307f861a05fe3fcc882caf1a5bb04a58b1a7"} Nov 26 18:00:01 crc kubenswrapper[5010]: I1126 18:00:01.256547 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" podStartSLOduration=1.256523595 podStartE2EDuration="1.256523595s" podCreationTimestamp="2025-11-26 18:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:00:01.247775827 +0000 UTC m=+9222.038493005" watchObservedRunningTime="2025-11-26 18:00:01.256523595 +0000 UTC m=+9222.047240753" Nov 26 18:00:02 crc kubenswrapper[5010]: I1126 18:00:02.254027 5010 generic.go:334] "Generic (PLEG): container finished" podID="91378621-a736-46f4-8dd4-d246b87cce88" containerID="f27b7f0847ee74d6d8a80e9731dce69b7fec329bf2d255d23753d49c8fd5b78f" exitCode=0 Nov 26 18:00:02 crc kubenswrapper[5010]: I1126 18:00:02.254467 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" event={"ID":"91378621-a736-46f4-8dd4-d246b87cce88","Type":"ContainerDied","Data":"f27b7f0847ee74d6d8a80e9731dce69b7fec329bf2d255d23753d49c8fd5b78f"} Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.628509 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.808531 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkr87\" (UniqueName: \"kubernetes.io/projected/91378621-a736-46f4-8dd4-d246b87cce88-kube-api-access-tkr87\") pod \"91378621-a736-46f4-8dd4-d246b87cce88\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.809003 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91378621-a736-46f4-8dd4-d246b87cce88-config-volume\") pod \"91378621-a736-46f4-8dd4-d246b87cce88\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.809072 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91378621-a736-46f4-8dd4-d246b87cce88-secret-volume\") pod \"91378621-a736-46f4-8dd4-d246b87cce88\" (UID: \"91378621-a736-46f4-8dd4-d246b87cce88\") " Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.809678 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91378621-a736-46f4-8dd4-d246b87cce88-config-volume" (OuterVolumeSpecName: "config-volume") pod "91378621-a736-46f4-8dd4-d246b87cce88" (UID: "91378621-a736-46f4-8dd4-d246b87cce88"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.814173 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91378621-a736-46f4-8dd4-d246b87cce88-kube-api-access-tkr87" (OuterVolumeSpecName: "kube-api-access-tkr87") pod "91378621-a736-46f4-8dd4-d246b87cce88" (UID: "91378621-a736-46f4-8dd4-d246b87cce88"). InnerVolumeSpecName "kube-api-access-tkr87". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.814572 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91378621-a736-46f4-8dd4-d246b87cce88-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "91378621-a736-46f4-8dd4-d246b87cce88" (UID: "91378621-a736-46f4-8dd4-d246b87cce88"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.912088 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/91378621-a736-46f4-8dd4-d246b87cce88-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.912447 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/91378621-a736-46f4-8dd4-d246b87cce88-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 18:00:03 crc kubenswrapper[5010]: I1126 18:00:03.912560 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkr87\" (UniqueName: \"kubernetes.io/projected/91378621-a736-46f4-8dd4-d246b87cce88-kube-api-access-tkr87\") on node \"crc\" DevicePath \"\"" Nov 26 18:00:04 crc kubenswrapper[5010]: I1126 18:00:04.290309 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" event={"ID":"91378621-a736-46f4-8dd4-d246b87cce88","Type":"ContainerDied","Data":"4369747043b33e8ca4543923c398307f861a05fe3fcc882caf1a5bb04a58b1a7"} Nov 26 18:00:04 crc kubenswrapper[5010]: I1126 18:00:04.290362 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4369747043b33e8ca4543923c398307f861a05fe3fcc882caf1a5bb04a58b1a7" Nov 26 18:00:04 crc kubenswrapper[5010]: I1126 18:00:04.290412 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403000-pk7p2" Nov 26 18:00:04 crc kubenswrapper[5010]: I1126 18:00:04.364287 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc"] Nov 26 18:00:04 crc kubenswrapper[5010]: I1126 18:00:04.378599 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402955-h7kbc"] Nov 26 18:00:05 crc kubenswrapper[5010]: I1126 18:00:05.917956 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8744ecb1-0343-454f-89ba-f7e8e63d40f5" path="/var/lib/kubelet/pods/8744ecb1-0343-454f-89ba-f7e8e63d40f5/volumes" Nov 26 18:00:07 crc kubenswrapper[5010]: I1126 18:00:07.892556 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:00:07 crc kubenswrapper[5010]: E1126 18:00:07.893549 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:00:13 crc kubenswrapper[5010]: I1126 18:00:13.366639 5010 scope.go:117] "RemoveContainer" containerID="8c318b6a6a889f2bfdd15c17a5148fefa56ba35b2f9bca5380e0587c5fbd723f" Nov 26 18:00:19 crc kubenswrapper[5010]: I1126 18:00:19.892815 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:00:19 crc kubenswrapper[5010]: E1126 18:00:19.893701 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:00:30 crc kubenswrapper[5010]: I1126 18:00:30.892502 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:00:30 crc kubenswrapper[5010]: E1126 18:00:30.893314 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:00:43 crc kubenswrapper[5010]: I1126 18:00:43.892109 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:00:43 crc kubenswrapper[5010]: E1126 18:00:43.893471 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:00:54 crc kubenswrapper[5010]: I1126 18:00:54.891883 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:00:54 crc kubenswrapper[5010]: E1126 18:00:54.892854 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.189876 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29403001-9675b"] Nov 26 18:01:00 crc kubenswrapper[5010]: E1126 18:01:00.192390 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91378621-a736-46f4-8dd4-d246b87cce88" containerName="collect-profiles" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.192412 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="91378621-a736-46f4-8dd4-d246b87cce88" containerName="collect-profiles" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.192654 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="91378621-a736-46f4-8dd4-d246b87cce88" containerName="collect-profiles" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.193486 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.201735 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29403001-9675b"] Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.293194 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-fernet-keys\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.293380 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-df2km\" (UniqueName: \"kubernetes.io/projected/7944fe1e-8e94-4f90-b1de-984ae9b16948-kube-api-access-df2km\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.293521 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-config-data\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.293595 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-combined-ca-bundle\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.395222 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-config-data\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.395308 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-combined-ca-bundle\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.395347 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-fernet-keys\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.395470 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-df2km\" (UniqueName: \"kubernetes.io/projected/7944fe1e-8e94-4f90-b1de-984ae9b16948-kube-api-access-df2km\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.403106 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-fernet-keys\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.404018 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-combined-ca-bundle\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.405468 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-config-data\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.414685 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-df2km\" (UniqueName: \"kubernetes.io/projected/7944fe1e-8e94-4f90-b1de-984ae9b16948-kube-api-access-df2km\") pod \"keystone-cron-29403001-9675b\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:00 crc kubenswrapper[5010]: I1126 18:01:00.573195 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:01 crc kubenswrapper[5010]: I1126 18:01:01.196200 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29403001-9675b"] Nov 26 18:01:01 crc kubenswrapper[5010]: W1126 18:01:01.719668 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7944fe1e_8e94_4f90_b1de_984ae9b16948.slice/crio-414903f696df24985c25e38e10178b830aeda010a087ce9241002318a29dc556 WatchSource:0}: Error finding container 414903f696df24985c25e38e10178b830aeda010a087ce9241002318a29dc556: Status 404 returned error can't find the container with id 414903f696df24985c25e38e10178b830aeda010a087ce9241002318a29dc556 Nov 26 18:01:02 crc kubenswrapper[5010]: I1126 18:01:02.418264 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403001-9675b" event={"ID":"7944fe1e-8e94-4f90-b1de-984ae9b16948","Type":"ContainerStarted","Data":"7004b8dcb51b33bb3f40a1af8c1d5809d8b5a9761d19fc3a1e6cc9b951658c22"} Nov 26 18:01:02 crc kubenswrapper[5010]: I1126 18:01:02.419598 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403001-9675b" event={"ID":"7944fe1e-8e94-4f90-b1de-984ae9b16948","Type":"ContainerStarted","Data":"414903f696df24985c25e38e10178b830aeda010a087ce9241002318a29dc556"} Nov 26 18:01:02 crc kubenswrapper[5010]: I1126 18:01:02.440954 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29403001-9675b" podStartSLOduration=2.440929367 podStartE2EDuration="2.440929367s" podCreationTimestamp="2025-11-26 18:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:01:02.431663186 +0000 UTC m=+9283.222380344" watchObservedRunningTime="2025-11-26 18:01:02.440929367 +0000 UTC m=+9283.231646515" Nov 26 18:01:05 crc kubenswrapper[5010]: I1126 18:01:05.453678 5010 generic.go:334] "Generic (PLEG): container finished" podID="7944fe1e-8e94-4f90-b1de-984ae9b16948" containerID="7004b8dcb51b33bb3f40a1af8c1d5809d8b5a9761d19fc3a1e6cc9b951658c22" exitCode=0 Nov 26 18:01:05 crc kubenswrapper[5010]: I1126 18:01:05.453748 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403001-9675b" event={"ID":"7944fe1e-8e94-4f90-b1de-984ae9b16948","Type":"ContainerDied","Data":"7004b8dcb51b33bb3f40a1af8c1d5809d8b5a9761d19fc3a1e6cc9b951658c22"} Nov 26 18:01:05 crc kubenswrapper[5010]: I1126 18:01:05.892445 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:01:05 crc kubenswrapper[5010]: E1126 18:01:05.893004 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.834072 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.889477 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-config-data\") pod \"7944fe1e-8e94-4f90-b1de-984ae9b16948\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.889519 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-fernet-keys\") pod \"7944fe1e-8e94-4f90-b1de-984ae9b16948\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.889577 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-df2km\" (UniqueName: \"kubernetes.io/projected/7944fe1e-8e94-4f90-b1de-984ae9b16948-kube-api-access-df2km\") pod \"7944fe1e-8e94-4f90-b1de-984ae9b16948\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.889699 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-combined-ca-bundle\") pod \"7944fe1e-8e94-4f90-b1de-984ae9b16948\" (UID: \"7944fe1e-8e94-4f90-b1de-984ae9b16948\") " Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.898475 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "7944fe1e-8e94-4f90-b1de-984ae9b16948" (UID: "7944fe1e-8e94-4f90-b1de-984ae9b16948"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.905104 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7944fe1e-8e94-4f90-b1de-984ae9b16948-kube-api-access-df2km" (OuterVolumeSpecName: "kube-api-access-df2km") pod "7944fe1e-8e94-4f90-b1de-984ae9b16948" (UID: "7944fe1e-8e94-4f90-b1de-984ae9b16948"). InnerVolumeSpecName "kube-api-access-df2km". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.934439 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7944fe1e-8e94-4f90-b1de-984ae9b16948" (UID: "7944fe1e-8e94-4f90-b1de-984ae9b16948"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.950938 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-config-data" (OuterVolumeSpecName: "config-data") pod "7944fe1e-8e94-4f90-b1de-984ae9b16948" (UID: "7944fe1e-8e94-4f90-b1de-984ae9b16948"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.993443 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-df2km\" (UniqueName: \"kubernetes.io/projected/7944fe1e-8e94-4f90-b1de-984ae9b16948-kube-api-access-df2km\") on node \"crc\" DevicePath \"\"" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.993474 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.993486 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 18:01:06 crc kubenswrapper[5010]: I1126 18:01:06.993496 5010 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/7944fe1e-8e94-4f90-b1de-984ae9b16948-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 26 18:01:07 crc kubenswrapper[5010]: I1126 18:01:07.477808 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29403001-9675b" event={"ID":"7944fe1e-8e94-4f90-b1de-984ae9b16948","Type":"ContainerDied","Data":"414903f696df24985c25e38e10178b830aeda010a087ce9241002318a29dc556"} Nov 26 18:01:07 crc kubenswrapper[5010]: I1126 18:01:07.478095 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="414903f696df24985c25e38e10178b830aeda010a087ce9241002318a29dc556" Nov 26 18:01:07 crc kubenswrapper[5010]: I1126 18:01:07.477896 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29403001-9675b" Nov 26 18:01:19 crc kubenswrapper[5010]: I1126 18:01:19.932124 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:01:19 crc kubenswrapper[5010]: E1126 18:01:19.934254 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:01:33 crc kubenswrapper[5010]: I1126 18:01:33.891589 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:01:33 crc kubenswrapper[5010]: E1126 18:01:33.895189 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:01:45 crc kubenswrapper[5010]: I1126 18:01:45.891516 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:01:45 crc kubenswrapper[5010]: E1126 18:01:45.892896 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:01:56 crc kubenswrapper[5010]: I1126 18:01:56.892054 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:01:56 crc kubenswrapper[5010]: E1126 18:01:56.893225 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:02:07 crc kubenswrapper[5010]: I1126 18:02:07.892596 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:02:07 crc kubenswrapper[5010]: E1126 18:02:07.893641 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:02:18 crc kubenswrapper[5010]: I1126 18:02:18.891863 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:02:18 crc kubenswrapper[5010]: E1126 18:02:18.892835 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:02:31 crc kubenswrapper[5010]: I1126 18:02:31.893543 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:02:31 crc kubenswrapper[5010]: E1126 18:02:31.894437 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:02:34 crc kubenswrapper[5010]: I1126 18:02:34.527564 5010 generic.go:334] "Generic (PLEG): container finished" podID="0f4095f7-6cba-45da-b62e-8e39587d45b0" containerID="ffd5eb48dfceddf66c0934dcaadb092c45517660120051b3a6881bdd5b4269a8" exitCode=0 Nov 26 18:02:34 crc kubenswrapper[5010]: I1126 18:02:34.528686 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" event={"ID":"0f4095f7-6cba-45da-b62e-8e39587d45b0","Type":"ContainerDied","Data":"ffd5eb48dfceddf66c0934dcaadb092c45517660120051b3a6881bdd5b4269a8"} Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.077017 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.204158 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-agent-neutron-config-0\") pod \"0f4095f7-6cba-45da-b62e-8e39587d45b0\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.204631 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbzlm\" (UniqueName: \"kubernetes.io/projected/0f4095f7-6cba-45da-b62e-8e39587d45b0-kube-api-access-dbzlm\") pod \"0f4095f7-6cba-45da-b62e-8e39587d45b0\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.205048 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-inventory\") pod \"0f4095f7-6cba-45da-b62e-8e39587d45b0\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.205090 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-combined-ca-bundle\") pod \"0f4095f7-6cba-45da-b62e-8e39587d45b0\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.205164 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-ssh-key\") pod \"0f4095f7-6cba-45da-b62e-8e39587d45b0\" (UID: \"0f4095f7-6cba-45da-b62e-8e39587d45b0\") " Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.221729 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f4095f7-6cba-45da-b62e-8e39587d45b0-kube-api-access-dbzlm" (OuterVolumeSpecName: "kube-api-access-dbzlm") pod "0f4095f7-6cba-45da-b62e-8e39587d45b0" (UID: "0f4095f7-6cba-45da-b62e-8e39587d45b0"). InnerVolumeSpecName "kube-api-access-dbzlm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.223055 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-combined-ca-bundle" (OuterVolumeSpecName: "neutron-sriov-combined-ca-bundle") pod "0f4095f7-6cba-45da-b62e-8e39587d45b0" (UID: "0f4095f7-6cba-45da-b62e-8e39587d45b0"). InnerVolumeSpecName "neutron-sriov-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.252802 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0f4095f7-6cba-45da-b62e-8e39587d45b0" (UID: "0f4095f7-6cba-45da-b62e-8e39587d45b0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.256299 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-inventory" (OuterVolumeSpecName: "inventory") pod "0f4095f7-6cba-45da-b62e-8e39587d45b0" (UID: "0f4095f7-6cba-45da-b62e-8e39587d45b0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.260144 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-sriov-agent-neutron-config-0") pod "0f4095f7-6cba-45da-b62e-8e39587d45b0" (UID: "0f4095f7-6cba-45da-b62e-8e39587d45b0"). InnerVolumeSpecName "neutron-sriov-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.309009 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.309056 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbzlm\" (UniqueName: \"kubernetes.io/projected/0f4095f7-6cba-45da-b62e-8e39587d45b0-kube-api-access-dbzlm\") on node \"crc\" DevicePath \"\"" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.309072 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.309087 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-sriov-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-neutron-sriov-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.309097 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0f4095f7-6cba-45da-b62e-8e39587d45b0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.552767 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" event={"ID":"0f4095f7-6cba-45da-b62e-8e39587d45b0","Type":"ContainerDied","Data":"f87d38e0e481f0aa30619766cb40161d518762ed69fcc2c87bd1a92bd15f2d03"} Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.552841 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f87d38e0e481f0aa30619766cb40161d518762ed69fcc2c87bd1a92bd15f2d03" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.552865 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-sriov-openstack-openstack-cell1-qcxvr" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.769630 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-br7dh"] Nov 26 18:02:36 crc kubenswrapper[5010]: E1126 18:02:36.770479 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f4095f7-6cba-45da-b62e-8e39587d45b0" containerName="neutron-sriov-openstack-openstack-cell1" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.770515 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f4095f7-6cba-45da-b62e-8e39587d45b0" containerName="neutron-sriov-openstack-openstack-cell1" Nov 26 18:02:36 crc kubenswrapper[5010]: E1126 18:02:36.770550 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7944fe1e-8e94-4f90-b1de-984ae9b16948" containerName="keystone-cron" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.770564 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="7944fe1e-8e94-4f90-b1de-984ae9b16948" containerName="keystone-cron" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.771066 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="7944fe1e-8e94-4f90-b1de-984ae9b16948" containerName="keystone-cron" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.771165 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f4095f7-6cba-45da-b62e-8e39587d45b0" containerName="neutron-sriov-openstack-openstack-cell1" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.772567 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.774592 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-dhcp-agent-neutron-config" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.774774 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.775500 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.776073 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.776327 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.784734 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-br7dh"] Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.921587 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.922041 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.922248 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.922427 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbfr4\" (UniqueName: \"kubernetes.io/projected/990bc3e4-a901-447a-b15a-a2fd34d84290-kube-api-access-zbfr4\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:36 crc kubenswrapper[5010]: I1126 18:02:36.922550 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.025042 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.025263 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.025485 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbfr4\" (UniqueName: \"kubernetes.io/projected/990bc3e4-a901-447a-b15a-a2fd34d84290-kube-api-access-zbfr4\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.025600 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.025848 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.031215 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-agent-neutron-config-0\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.032677 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-combined-ca-bundle\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.033476 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-inventory\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.041083 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-ssh-key\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.047654 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbfr4\" (UniqueName: \"kubernetes.io/projected/990bc3e4-a901-447a-b15a-a2fd34d84290-kube-api-access-zbfr4\") pod \"neutron-dhcp-openstack-openstack-cell1-br7dh\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.104422 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.728874 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-dhcp-openstack-openstack-cell1-br7dh"] Nov 26 18:02:37 crc kubenswrapper[5010]: I1126 18:02:37.741930 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 18:02:38 crc kubenswrapper[5010]: I1126 18:02:38.572750 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" event={"ID":"990bc3e4-a901-447a-b15a-a2fd34d84290","Type":"ContainerStarted","Data":"1441b70562f52170ec362244efc0a58e516515e3dbc005c991880b68d43e65f2"} Nov 26 18:02:38 crc kubenswrapper[5010]: I1126 18:02:38.573099 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" event={"ID":"990bc3e4-a901-447a-b15a-a2fd34d84290","Type":"ContainerStarted","Data":"ca1db54b74b3ac770c9542505b89096fbdf12a58186323e2b58ddfd11112ba7f"} Nov 26 18:02:38 crc kubenswrapper[5010]: I1126 18:02:38.605653 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" podStartSLOduration=2.150714688 podStartE2EDuration="2.605633051s" podCreationTimestamp="2025-11-26 18:02:36 +0000 UTC" firstStartedPulling="2025-11-26 18:02:37.741591508 +0000 UTC m=+9378.532308666" lastFinishedPulling="2025-11-26 18:02:38.196509871 +0000 UTC m=+9378.987227029" observedRunningTime="2025-11-26 18:02:38.594442162 +0000 UTC m=+9379.385159320" watchObservedRunningTime="2025-11-26 18:02:38.605633051 +0000 UTC m=+9379.396350189" Nov 26 18:02:42 crc kubenswrapper[5010]: I1126 18:02:42.892157 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:02:42 crc kubenswrapper[5010]: E1126 18:02:42.893351 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:02:56 crc kubenswrapper[5010]: I1126 18:02:56.891636 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:02:56 crc kubenswrapper[5010]: E1126 18:02:56.892541 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.079695 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-h46vv"] Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.084642 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.098945 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h46vv"] Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.133625 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4rtrf\" (UniqueName: \"kubernetes.io/projected/f48eb3b1-201b-4741-b62a-3adf301cf26a-kube-api-access-4rtrf\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.133692 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-catalog-content\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.133758 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-utilities\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.235981 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4rtrf\" (UniqueName: \"kubernetes.io/projected/f48eb3b1-201b-4741-b62a-3adf301cf26a-kube-api-access-4rtrf\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.236047 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-catalog-content\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.236113 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-utilities\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.236797 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-utilities\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.237382 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-catalog-content\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.267848 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4rtrf\" (UniqueName: \"kubernetes.io/projected/f48eb3b1-201b-4741-b62a-3adf301cf26a-kube-api-access-4rtrf\") pod \"community-operators-h46vv\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.430698 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:04 crc kubenswrapper[5010]: I1126 18:03:04.948527 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-h46vv"] Nov 26 18:03:05 crc kubenswrapper[5010]: I1126 18:03:05.908671 5010 generic.go:334] "Generic (PLEG): container finished" podID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerID="750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5" exitCode=0 Nov 26 18:03:05 crc kubenswrapper[5010]: I1126 18:03:05.908872 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h46vv" event={"ID":"f48eb3b1-201b-4741-b62a-3adf301cf26a","Type":"ContainerDied","Data":"750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5"} Nov 26 18:03:05 crc kubenswrapper[5010]: I1126 18:03:05.909099 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h46vv" event={"ID":"f48eb3b1-201b-4741-b62a-3adf301cf26a","Type":"ContainerStarted","Data":"bd68c67c89b03883d7a2a8edc433394193c53d8d1bd1cd2b41e6588f0c794d8d"} Nov 26 18:03:08 crc kubenswrapper[5010]: I1126 18:03:08.952419 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h46vv" event={"ID":"f48eb3b1-201b-4741-b62a-3adf301cf26a","Type":"ContainerStarted","Data":"d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399"} Nov 26 18:03:09 crc kubenswrapper[5010]: I1126 18:03:09.966782 5010 generic.go:334] "Generic (PLEG): container finished" podID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerID="d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399" exitCode=0 Nov 26 18:03:09 crc kubenswrapper[5010]: I1126 18:03:09.966845 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h46vv" event={"ID":"f48eb3b1-201b-4741-b62a-3adf301cf26a","Type":"ContainerDied","Data":"d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399"} Nov 26 18:03:10 crc kubenswrapper[5010]: I1126 18:03:10.891392 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:03:10 crc kubenswrapper[5010]: E1126 18:03:10.891982 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:03:11 crc kubenswrapper[5010]: I1126 18:03:11.989767 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h46vv" event={"ID":"f48eb3b1-201b-4741-b62a-3adf301cf26a","Type":"ContainerStarted","Data":"d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f"} Nov 26 18:03:12 crc kubenswrapper[5010]: I1126 18:03:12.032965 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-h46vv" podStartSLOduration=3.03937866 podStartE2EDuration="8.032945079s" podCreationTimestamp="2025-11-26 18:03:04 +0000 UTC" firstStartedPulling="2025-11-26 18:03:05.910915508 +0000 UTC m=+9406.701632656" lastFinishedPulling="2025-11-26 18:03:10.904481927 +0000 UTC m=+9411.695199075" observedRunningTime="2025-11-26 18:03:12.020656662 +0000 UTC m=+9412.811373810" watchObservedRunningTime="2025-11-26 18:03:12.032945079 +0000 UTC m=+9412.823662227" Nov 26 18:03:14 crc kubenswrapper[5010]: I1126 18:03:14.430982 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:14 crc kubenswrapper[5010]: I1126 18:03:14.431827 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:14 crc kubenswrapper[5010]: I1126 18:03:14.528075 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:24 crc kubenswrapper[5010]: I1126 18:03:24.547121 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:24 crc kubenswrapper[5010]: I1126 18:03:24.603687 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h46vv"] Nov 26 18:03:24 crc kubenswrapper[5010]: I1126 18:03:24.891803 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:03:24 crc kubenswrapper[5010]: E1126 18:03:24.892130 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:03:25 crc kubenswrapper[5010]: I1126 18:03:25.517042 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-h46vv" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="registry-server" containerID="cri-o://d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f" gracePeriod=2 Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.065260 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.180537 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4rtrf\" (UniqueName: \"kubernetes.io/projected/f48eb3b1-201b-4741-b62a-3adf301cf26a-kube-api-access-4rtrf\") pod \"f48eb3b1-201b-4741-b62a-3adf301cf26a\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.180832 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-catalog-content\") pod \"f48eb3b1-201b-4741-b62a-3adf301cf26a\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.181027 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-utilities\") pod \"f48eb3b1-201b-4741-b62a-3adf301cf26a\" (UID: \"f48eb3b1-201b-4741-b62a-3adf301cf26a\") " Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.181809 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-utilities" (OuterVolumeSpecName: "utilities") pod "f48eb3b1-201b-4741-b62a-3adf301cf26a" (UID: "f48eb3b1-201b-4741-b62a-3adf301cf26a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.182834 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.199204 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f48eb3b1-201b-4741-b62a-3adf301cf26a-kube-api-access-4rtrf" (OuterVolumeSpecName: "kube-api-access-4rtrf") pod "f48eb3b1-201b-4741-b62a-3adf301cf26a" (UID: "f48eb3b1-201b-4741-b62a-3adf301cf26a"). InnerVolumeSpecName "kube-api-access-4rtrf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.235809 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f48eb3b1-201b-4741-b62a-3adf301cf26a" (UID: "f48eb3b1-201b-4741-b62a-3adf301cf26a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.285063 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4rtrf\" (UniqueName: \"kubernetes.io/projected/f48eb3b1-201b-4741-b62a-3adf301cf26a-kube-api-access-4rtrf\") on node \"crc\" DevicePath \"\"" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.285100 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f48eb3b1-201b-4741-b62a-3adf301cf26a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.528076 5010 generic.go:334] "Generic (PLEG): container finished" podID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerID="d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f" exitCode=0 Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.528117 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h46vv" event={"ID":"f48eb3b1-201b-4741-b62a-3adf301cf26a","Type":"ContainerDied","Data":"d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f"} Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.528142 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-h46vv" event={"ID":"f48eb3b1-201b-4741-b62a-3adf301cf26a","Type":"ContainerDied","Data":"bd68c67c89b03883d7a2a8edc433394193c53d8d1bd1cd2b41e6588f0c794d8d"} Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.528157 5010 scope.go:117] "RemoveContainer" containerID="d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.528271 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-h46vv" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.560055 5010 scope.go:117] "RemoveContainer" containerID="d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.568722 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-h46vv"] Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.578223 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-h46vv"] Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.600007 5010 scope.go:117] "RemoveContainer" containerID="750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.662795 5010 scope.go:117] "RemoveContainer" containerID="d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f" Nov 26 18:03:26 crc kubenswrapper[5010]: E1126 18:03:26.663391 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f\": container with ID starting with d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f not found: ID does not exist" containerID="d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.663489 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f"} err="failed to get container status \"d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f\": rpc error: code = NotFound desc = could not find container \"d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f\": container with ID starting with d30141a81e8575f2cc0294f6062eb90c9b70f4069e965ffaf45edaf6378d465f not found: ID does not exist" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.663535 5010 scope.go:117] "RemoveContainer" containerID="d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399" Nov 26 18:03:26 crc kubenswrapper[5010]: E1126 18:03:26.664246 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399\": container with ID starting with d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399 not found: ID does not exist" containerID="d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.664288 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399"} err="failed to get container status \"d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399\": rpc error: code = NotFound desc = could not find container \"d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399\": container with ID starting with d73489710aca936918c4229d81860f9193a1f742fd0bdee7f0ad7447396c3399 not found: ID does not exist" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.664335 5010 scope.go:117] "RemoveContainer" containerID="750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5" Nov 26 18:03:26 crc kubenswrapper[5010]: E1126 18:03:26.664774 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5\": container with ID starting with 750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5 not found: ID does not exist" containerID="750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5" Nov 26 18:03:26 crc kubenswrapper[5010]: I1126 18:03:26.664799 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5"} err="failed to get container status \"750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5\": rpc error: code = NotFound desc = could not find container \"750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5\": container with ID starting with 750daf53fecb40f9e7890252d34fd6dce1059aad3d8d7905cea5a2201b2cf9c5 not found: ID does not exist" Nov 26 18:03:27 crc kubenswrapper[5010]: I1126 18:03:27.906551 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" path="/var/lib/kubelet/pods/f48eb3b1-201b-4741-b62a-3adf301cf26a/volumes" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.799407 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cghs8"] Nov 26 18:03:38 crc kubenswrapper[5010]: E1126 18:03:38.800383 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="registry-server" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.800396 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="registry-server" Nov 26 18:03:38 crc kubenswrapper[5010]: E1126 18:03:38.800422 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="extract-content" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.800428 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="extract-content" Nov 26 18:03:38 crc kubenswrapper[5010]: E1126 18:03:38.800458 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="extract-utilities" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.800464 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="extract-utilities" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.800656 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f48eb3b1-201b-4741-b62a-3adf301cf26a" containerName="registry-server" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.802235 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.803096 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cghs8"] Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.888982 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-catalog-content\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.889192 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwm2z\" (UniqueName: \"kubernetes.io/projected/b1ebbbd3-a5cb-4419-b005-cb7c197688df-kube-api-access-fwm2z\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.889521 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-utilities\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.892861 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:03:38 crc kubenswrapper[5010]: E1126 18:03:38.893192 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.991690 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwm2z\" (UniqueName: \"kubernetes.io/projected/b1ebbbd3-a5cb-4419-b005-cb7c197688df-kube-api-access-fwm2z\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.991889 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-utilities\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.991964 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-catalog-content\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.992701 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-utilities\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:38 crc kubenswrapper[5010]: I1126 18:03:38.994032 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-catalog-content\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:39 crc kubenswrapper[5010]: I1126 18:03:39.014909 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwm2z\" (UniqueName: \"kubernetes.io/projected/b1ebbbd3-a5cb-4419-b005-cb7c197688df-kube-api-access-fwm2z\") pod \"redhat-marketplace-cghs8\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:39 crc kubenswrapper[5010]: I1126 18:03:39.139953 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:39 crc kubenswrapper[5010]: I1126 18:03:39.641499 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cghs8"] Nov 26 18:03:39 crc kubenswrapper[5010]: I1126 18:03:39.706067 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cghs8" event={"ID":"b1ebbbd3-a5cb-4419-b005-cb7c197688df","Type":"ContainerStarted","Data":"3e861baf455e433fff02ca4204d1858e78313f81cb497b1280fdc545c176fa04"} Nov 26 18:03:40 crc kubenswrapper[5010]: I1126 18:03:40.720582 5010 generic.go:334] "Generic (PLEG): container finished" podID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerID="d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207" exitCode=0 Nov 26 18:03:40 crc kubenswrapper[5010]: I1126 18:03:40.720822 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cghs8" event={"ID":"b1ebbbd3-a5cb-4419-b005-cb7c197688df","Type":"ContainerDied","Data":"d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207"} Nov 26 18:03:42 crc kubenswrapper[5010]: I1126 18:03:42.744067 5010 generic.go:334] "Generic (PLEG): container finished" podID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerID="aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1" exitCode=0 Nov 26 18:03:42 crc kubenswrapper[5010]: I1126 18:03:42.744254 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cghs8" event={"ID":"b1ebbbd3-a5cb-4419-b005-cb7c197688df","Type":"ContainerDied","Data":"aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1"} Nov 26 18:03:43 crc kubenswrapper[5010]: I1126 18:03:43.768133 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cghs8" event={"ID":"b1ebbbd3-a5cb-4419-b005-cb7c197688df","Type":"ContainerStarted","Data":"ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781"} Nov 26 18:03:43 crc kubenswrapper[5010]: I1126 18:03:43.807967 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cghs8" podStartSLOduration=3.336148804 podStartE2EDuration="5.80794655s" podCreationTimestamp="2025-11-26 18:03:38 +0000 UTC" firstStartedPulling="2025-11-26 18:03:40.722846699 +0000 UTC m=+9441.513563847" lastFinishedPulling="2025-11-26 18:03:43.194644405 +0000 UTC m=+9443.985361593" observedRunningTime="2025-11-26 18:03:43.795759706 +0000 UTC m=+9444.586476904" watchObservedRunningTime="2025-11-26 18:03:43.80794655 +0000 UTC m=+9444.598663708" Nov 26 18:03:49 crc kubenswrapper[5010]: I1126 18:03:49.140356 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:49 crc kubenswrapper[5010]: I1126 18:03:49.141150 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:49 crc kubenswrapper[5010]: I1126 18:03:49.231882 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:49 crc kubenswrapper[5010]: I1126 18:03:49.940699 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:50 crc kubenswrapper[5010]: I1126 18:03:50.007377 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cghs8"] Nov 26 18:03:51 crc kubenswrapper[5010]: I1126 18:03:51.869658 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cghs8" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="registry-server" containerID="cri-o://ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781" gracePeriod=2 Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.431607 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.562001 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-utilities\") pod \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.562249 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-catalog-content\") pod \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.562609 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fwm2z\" (UniqueName: \"kubernetes.io/projected/b1ebbbd3-a5cb-4419-b005-cb7c197688df-kube-api-access-fwm2z\") pod \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\" (UID: \"b1ebbbd3-a5cb-4419-b005-cb7c197688df\") " Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.563201 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-utilities" (OuterVolumeSpecName: "utilities") pod "b1ebbbd3-a5cb-4419-b005-cb7c197688df" (UID: "b1ebbbd3-a5cb-4419-b005-cb7c197688df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.570665 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1ebbbd3-a5cb-4419-b005-cb7c197688df-kube-api-access-fwm2z" (OuterVolumeSpecName: "kube-api-access-fwm2z") pod "b1ebbbd3-a5cb-4419-b005-cb7c197688df" (UID: "b1ebbbd3-a5cb-4419-b005-cb7c197688df"). InnerVolumeSpecName "kube-api-access-fwm2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.578678 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b1ebbbd3-a5cb-4419-b005-cb7c197688df" (UID: "b1ebbbd3-a5cb-4419-b005-cb7c197688df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.664897 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fwm2z\" (UniqueName: \"kubernetes.io/projected/b1ebbbd3-a5cb-4419-b005-cb7c197688df-kube-api-access-fwm2z\") on node \"crc\" DevicePath \"\"" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.665338 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.665350 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b1ebbbd3-a5cb-4419-b005-cb7c197688df-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.886231 5010 generic.go:334] "Generic (PLEG): container finished" podID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerID="ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781" exitCode=0 Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.886274 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cghs8" event={"ID":"b1ebbbd3-a5cb-4419-b005-cb7c197688df","Type":"ContainerDied","Data":"ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781"} Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.886304 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cghs8" event={"ID":"b1ebbbd3-a5cb-4419-b005-cb7c197688df","Type":"ContainerDied","Data":"3e861baf455e433fff02ca4204d1858e78313f81cb497b1280fdc545c176fa04"} Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.886322 5010 scope.go:117] "RemoveContainer" containerID="ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.886325 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cghs8" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.891935 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.922814 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cghs8"] Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.932579 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cghs8"] Nov 26 18:03:52 crc kubenswrapper[5010]: I1126 18:03:52.950050 5010 scope.go:117] "RemoveContainer" containerID="aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.006383 5010 scope.go:117] "RemoveContainer" containerID="d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.101025 5010 scope.go:117] "RemoveContainer" containerID="ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781" Nov 26 18:03:53 crc kubenswrapper[5010]: E1126 18:03:53.101724 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781\": container with ID starting with ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781 not found: ID does not exist" containerID="ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.101767 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781"} err="failed to get container status \"ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781\": rpc error: code = NotFound desc = could not find container \"ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781\": container with ID starting with ee18300a58085321185bbcb69856f5fe124f4c0f4b02ba3cd9b263c11163c781 not found: ID does not exist" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.101799 5010 scope.go:117] "RemoveContainer" containerID="aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1" Nov 26 18:03:53 crc kubenswrapper[5010]: E1126 18:03:53.102288 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1\": container with ID starting with aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1 not found: ID does not exist" containerID="aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.102312 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1"} err="failed to get container status \"aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1\": rpc error: code = NotFound desc = could not find container \"aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1\": container with ID starting with aa015c5153dfa69d30e5932ce22af606d4d73b36c9f921f2e169746e464700e1 not found: ID does not exist" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.102324 5010 scope.go:117] "RemoveContainer" containerID="d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207" Nov 26 18:03:53 crc kubenswrapper[5010]: E1126 18:03:53.102659 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207\": container with ID starting with d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207 not found: ID does not exist" containerID="d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.102690 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207"} err="failed to get container status \"d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207\": rpc error: code = NotFound desc = could not find container \"d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207\": container with ID starting with d7945e1dd0e5a8d194d9dceea465db6da5126b6461c403e7388d97584acee207 not found: ID does not exist" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.912598 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" path="/var/lib/kubelet/pods/b1ebbbd3-a5cb-4419-b005-cb7c197688df/volumes" Nov 26 18:03:53 crc kubenswrapper[5010]: I1126 18:03:53.919962 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"b5250fe989f5010f7836a2449aac67959322b23a476a4ba6d58843cef5d7d82f"} Nov 26 18:04:55 crc kubenswrapper[5010]: I1126 18:04:55.704182 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="536595b1-5ba9-4588-8e64-32480adb79ea" containerName="galera" probeResult="failure" output="command timed out" Nov 26 18:04:55 crc kubenswrapper[5010]: I1126 18:04:55.709803 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="536595b1-5ba9-4588-8e64-32480adb79ea" containerName="galera" probeResult="failure" output="command timed out" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.766620 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z9kmr"] Nov 26 18:05:05 crc kubenswrapper[5010]: E1126 18:05:05.769381 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="registry-server" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.769536 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="registry-server" Nov 26 18:05:05 crc kubenswrapper[5010]: E1126 18:05:05.769689 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="extract-utilities" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.769839 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="extract-utilities" Nov 26 18:05:05 crc kubenswrapper[5010]: E1126 18:05:05.769983 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="extract-content" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.770094 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="extract-content" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.770576 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1ebbbd3-a5cb-4419-b005-cb7c197688df" containerName="registry-server" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.781966 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.816063 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9kmr"] Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.876241 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-catalog-content\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.876313 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-utilities\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.876391 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcknb\" (UniqueName: \"kubernetes.io/projected/cbee8cbc-2a17-49b5-8595-3211266c3fa6-kube-api-access-mcknb\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.978059 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-utilities\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.978420 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcknb\" (UniqueName: \"kubernetes.io/projected/cbee8cbc-2a17-49b5-8595-3211266c3fa6-kube-api-access-mcknb\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.978676 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-catalog-content\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.979215 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-catalog-content\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.979725 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-utilities\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:05 crc kubenswrapper[5010]: I1126 18:05:05.996903 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcknb\" (UniqueName: \"kubernetes.io/projected/cbee8cbc-2a17-49b5-8595-3211266c3fa6-kube-api-access-mcknb\") pod \"certified-operators-z9kmr\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:06 crc kubenswrapper[5010]: I1126 18:05:06.105991 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:06 crc kubenswrapper[5010]: I1126 18:05:06.635902 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z9kmr"] Nov 26 18:05:06 crc kubenswrapper[5010]: I1126 18:05:06.892342 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerStarted","Data":"7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73"} Nov 26 18:05:06 crc kubenswrapper[5010]: I1126 18:05:06.892647 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerStarted","Data":"e698e07b4c7687c75e09360a0929fcc05834aa75d958daa642cd9a5620cb22d7"} Nov 26 18:05:07 crc kubenswrapper[5010]: I1126 18:05:07.910181 5010 generic.go:334] "Generic (PLEG): container finished" podID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerID="7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73" exitCode=0 Nov 26 18:05:07 crc kubenswrapper[5010]: I1126 18:05:07.910258 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerDied","Data":"7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73"} Nov 26 18:05:07 crc kubenswrapper[5010]: I1126 18:05:07.910555 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerStarted","Data":"36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8"} Nov 26 18:05:08 crc kubenswrapper[5010]: I1126 18:05:08.925589 5010 generic.go:334] "Generic (PLEG): container finished" podID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerID="36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8" exitCode=0 Nov 26 18:05:08 crc kubenswrapper[5010]: I1126 18:05:08.925829 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerDied","Data":"36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8"} Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.598268 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-d9npr"] Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.609531 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.620717 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-d9npr"] Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.671398 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-utilities\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.671524 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7v4f7\" (UniqueName: \"kubernetes.io/projected/13180470-69ca-4cf3-8790-e191b60ec55a-kube-api-access-7v4f7\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.671926 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-catalog-content\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.773900 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-utilities\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.774409 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-utilities\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.774552 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7v4f7\" (UniqueName: \"kubernetes.io/projected/13180470-69ca-4cf3-8790-e191b60ec55a-kube-api-access-7v4f7\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.774969 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-catalog-content\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.775014 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-catalog-content\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.803288 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7v4f7\" (UniqueName: \"kubernetes.io/projected/13180470-69ca-4cf3-8790-e191b60ec55a-kube-api-access-7v4f7\") pod \"redhat-operators-d9npr\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.937675 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerStarted","Data":"a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f"} Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.939117 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:09 crc kubenswrapper[5010]: I1126 18:05:09.966657 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z9kmr" podStartSLOduration=2.46036922 podStartE2EDuration="4.966635156s" podCreationTimestamp="2025-11-26 18:05:05 +0000 UTC" firstStartedPulling="2025-11-26 18:05:06.894700883 +0000 UTC m=+9527.685418031" lastFinishedPulling="2025-11-26 18:05:09.400966779 +0000 UTC m=+9530.191683967" observedRunningTime="2025-11-26 18:05:09.956187565 +0000 UTC m=+9530.746904723" watchObservedRunningTime="2025-11-26 18:05:09.966635156 +0000 UTC m=+9530.757352304" Nov 26 18:05:10 crc kubenswrapper[5010]: I1126 18:05:10.424739 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-d9npr"] Nov 26 18:05:10 crc kubenswrapper[5010]: I1126 18:05:10.980656 5010 generic.go:334] "Generic (PLEG): container finished" podID="13180470-69ca-4cf3-8790-e191b60ec55a" containerID="248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65" exitCode=0 Nov 26 18:05:10 crc kubenswrapper[5010]: I1126 18:05:10.982372 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d9npr" event={"ID":"13180470-69ca-4cf3-8790-e191b60ec55a","Type":"ContainerDied","Data":"248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65"} Nov 26 18:05:10 crc kubenswrapper[5010]: I1126 18:05:10.982417 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d9npr" event={"ID":"13180470-69ca-4cf3-8790-e191b60ec55a","Type":"ContainerStarted","Data":"96a2869cfd222bf402566b7376e3b3f300c42144d862d6039006644bb714f3a5"} Nov 26 18:05:11 crc kubenswrapper[5010]: I1126 18:05:11.993913 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d9npr" event={"ID":"13180470-69ca-4cf3-8790-e191b60ec55a","Type":"ContainerStarted","Data":"58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f"} Nov 26 18:05:14 crc kubenswrapper[5010]: I1126 18:05:14.014304 5010 generic.go:334] "Generic (PLEG): container finished" podID="13180470-69ca-4cf3-8790-e191b60ec55a" containerID="58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f" exitCode=0 Nov 26 18:05:14 crc kubenswrapper[5010]: I1126 18:05:14.014385 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d9npr" event={"ID":"13180470-69ca-4cf3-8790-e191b60ec55a","Type":"ContainerDied","Data":"58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f"} Nov 26 18:05:15 crc kubenswrapper[5010]: I1126 18:05:15.028871 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d9npr" event={"ID":"13180470-69ca-4cf3-8790-e191b60ec55a","Type":"ContainerStarted","Data":"c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78"} Nov 26 18:05:15 crc kubenswrapper[5010]: I1126 18:05:15.060951 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-d9npr" podStartSLOduration=2.344982353 podStartE2EDuration="6.060926338s" podCreationTimestamp="2025-11-26 18:05:09 +0000 UTC" firstStartedPulling="2025-11-26 18:05:10.986309202 +0000 UTC m=+9531.777026350" lastFinishedPulling="2025-11-26 18:05:14.702253157 +0000 UTC m=+9535.492970335" observedRunningTime="2025-11-26 18:05:15.058404255 +0000 UTC m=+9535.849121423" watchObservedRunningTime="2025-11-26 18:05:15.060926338 +0000 UTC m=+9535.851643536" Nov 26 18:05:16 crc kubenswrapper[5010]: I1126 18:05:16.106894 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:16 crc kubenswrapper[5010]: I1126 18:05:16.107858 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:16 crc kubenswrapper[5010]: I1126 18:05:16.983580 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:17 crc kubenswrapper[5010]: I1126 18:05:17.125194 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:18 crc kubenswrapper[5010]: I1126 18:05:18.318404 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9kmr"] Nov 26 18:05:19 crc kubenswrapper[5010]: I1126 18:05:19.939247 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:19 crc kubenswrapper[5010]: I1126 18:05:19.939615 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.083390 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z9kmr" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="registry-server" containerID="cri-o://a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f" gracePeriod=2 Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.689324 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.746889 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcknb\" (UniqueName: \"kubernetes.io/projected/cbee8cbc-2a17-49b5-8595-3211266c3fa6-kube-api-access-mcknb\") pod \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.747333 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-utilities\") pod \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.747467 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-catalog-content\") pod \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\" (UID: \"cbee8cbc-2a17-49b5-8595-3211266c3fa6\") " Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.748302 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-utilities" (OuterVolumeSpecName: "utilities") pod "cbee8cbc-2a17-49b5-8595-3211266c3fa6" (UID: "cbee8cbc-2a17-49b5-8595-3211266c3fa6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.763461 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbee8cbc-2a17-49b5-8595-3211266c3fa6-kube-api-access-mcknb" (OuterVolumeSpecName: "kube-api-access-mcknb") pod "cbee8cbc-2a17-49b5-8595-3211266c3fa6" (UID: "cbee8cbc-2a17-49b5-8595-3211266c3fa6"). InnerVolumeSpecName "kube-api-access-mcknb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.830052 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cbee8cbc-2a17-49b5-8595-3211266c3fa6" (UID: "cbee8cbc-2a17-49b5-8595-3211266c3fa6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.851115 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.851169 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cbee8cbc-2a17-49b5-8595-3211266c3fa6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.851189 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcknb\" (UniqueName: \"kubernetes.io/projected/cbee8cbc-2a17-49b5-8595-3211266c3fa6-kube-api-access-mcknb\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:20 crc kubenswrapper[5010]: I1126 18:05:20.989981 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-d9npr" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="registry-server" probeResult="failure" output=< Nov 26 18:05:20 crc kubenswrapper[5010]: timeout: failed to connect service ":50051" within 1s Nov 26 18:05:20 crc kubenswrapper[5010]: > Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.095799 5010 generic.go:334] "Generic (PLEG): container finished" podID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerID="a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f" exitCode=0 Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.095859 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z9kmr" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.096335 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerDied","Data":"a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f"} Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.096473 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z9kmr" event={"ID":"cbee8cbc-2a17-49b5-8595-3211266c3fa6","Type":"ContainerDied","Data":"e698e07b4c7687c75e09360a0929fcc05834aa75d958daa642cd9a5620cb22d7"} Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.096544 5010 scope.go:117] "RemoveContainer" containerID="a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.127138 5010 scope.go:117] "RemoveContainer" containerID="36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.133879 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z9kmr"] Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.144983 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z9kmr"] Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.157468 5010 scope.go:117] "RemoveContainer" containerID="7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.201477 5010 scope.go:117] "RemoveContainer" containerID="a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f" Nov 26 18:05:21 crc kubenswrapper[5010]: E1126 18:05:21.202041 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f\": container with ID starting with a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f not found: ID does not exist" containerID="a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.202101 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f"} err="failed to get container status \"a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f\": rpc error: code = NotFound desc = could not find container \"a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f\": container with ID starting with a36ae1bd359494cb6c550f8f08e5bd8dd078b3eddad269364e6746dd0cfd8f9f not found: ID does not exist" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.202135 5010 scope.go:117] "RemoveContainer" containerID="36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8" Nov 26 18:05:21 crc kubenswrapper[5010]: E1126 18:05:21.202490 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8\": container with ID starting with 36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8 not found: ID does not exist" containerID="36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.202540 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8"} err="failed to get container status \"36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8\": rpc error: code = NotFound desc = could not find container \"36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8\": container with ID starting with 36e67d2520121f06c20c930cfcd7c276ba903221b59b5a4e9ea1d64b9f71a6f8 not found: ID does not exist" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.202574 5010 scope.go:117] "RemoveContainer" containerID="7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73" Nov 26 18:05:21 crc kubenswrapper[5010]: E1126 18:05:21.202902 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73\": container with ID starting with 7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73 not found: ID does not exist" containerID="7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.202930 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73"} err="failed to get container status \"7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73\": rpc error: code = NotFound desc = could not find container \"7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73\": container with ID starting with 7b3655a955e6f60870301cd62e25c04d3925cededf1457ba21df1a093f33af73 not found: ID does not exist" Nov 26 18:05:21 crc kubenswrapper[5010]: I1126 18:05:21.914870 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" path="/var/lib/kubelet/pods/cbee8cbc-2a17-49b5-8595-3211266c3fa6/volumes" Nov 26 18:05:30 crc kubenswrapper[5010]: I1126 18:05:30.011111 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:30 crc kubenswrapper[5010]: I1126 18:05:30.079164 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:30 crc kubenswrapper[5010]: I1126 18:05:30.262959 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-d9npr"] Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.213516 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-d9npr" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="registry-server" containerID="cri-o://c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78" gracePeriod=2 Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.776669 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.823916 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-catalog-content\") pod \"13180470-69ca-4cf3-8790-e191b60ec55a\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.824364 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-utilities\") pod \"13180470-69ca-4cf3-8790-e191b60ec55a\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.825541 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-utilities" (OuterVolumeSpecName: "utilities") pod "13180470-69ca-4cf3-8790-e191b60ec55a" (UID: "13180470-69ca-4cf3-8790-e191b60ec55a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.919473 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "13180470-69ca-4cf3-8790-e191b60ec55a" (UID: "13180470-69ca-4cf3-8790-e191b60ec55a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.926480 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7v4f7\" (UniqueName: \"kubernetes.io/projected/13180470-69ca-4cf3-8790-e191b60ec55a-kube-api-access-7v4f7\") pod \"13180470-69ca-4cf3-8790-e191b60ec55a\" (UID: \"13180470-69ca-4cf3-8790-e191b60ec55a\") " Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.927863 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.927891 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/13180470-69ca-4cf3-8790-e191b60ec55a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:31 crc kubenswrapper[5010]: I1126 18:05:31.940064 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13180470-69ca-4cf3-8790-e191b60ec55a-kube-api-access-7v4f7" (OuterVolumeSpecName: "kube-api-access-7v4f7") pod "13180470-69ca-4cf3-8790-e191b60ec55a" (UID: "13180470-69ca-4cf3-8790-e191b60ec55a"). InnerVolumeSpecName "kube-api-access-7v4f7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.028984 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7v4f7\" (UniqueName: \"kubernetes.io/projected/13180470-69ca-4cf3-8790-e191b60ec55a-kube-api-access-7v4f7\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.230341 5010 generic.go:334] "Generic (PLEG): container finished" podID="13180470-69ca-4cf3-8790-e191b60ec55a" containerID="c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78" exitCode=0 Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.230425 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d9npr" event={"ID":"13180470-69ca-4cf3-8790-e191b60ec55a","Type":"ContainerDied","Data":"c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78"} Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.230457 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-d9npr" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.230490 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-d9npr" event={"ID":"13180470-69ca-4cf3-8790-e191b60ec55a","Type":"ContainerDied","Data":"96a2869cfd222bf402566b7376e3b3f300c42144d862d6039006644bb714f3a5"} Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.230529 5010 scope.go:117] "RemoveContainer" containerID="c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.265949 5010 scope.go:117] "RemoveContainer" containerID="58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.296811 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-d9npr"] Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.304636 5010 scope.go:117] "RemoveContainer" containerID="248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.308047 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-d9npr"] Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.376446 5010 scope.go:117] "RemoveContainer" containerID="c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78" Nov 26 18:05:32 crc kubenswrapper[5010]: E1126 18:05:32.376952 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78\": container with ID starting with c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78 not found: ID does not exist" containerID="c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.377083 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78"} err="failed to get container status \"c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78\": rpc error: code = NotFound desc = could not find container \"c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78\": container with ID starting with c6d111f435616f3647a983be11a7a21de7d51ed9f7b8691b6706644d736bcd78 not found: ID does not exist" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.377175 5010 scope.go:117] "RemoveContainer" containerID="58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f" Nov 26 18:05:32 crc kubenswrapper[5010]: E1126 18:05:32.377652 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f\": container with ID starting with 58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f not found: ID does not exist" containerID="58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.377684 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f"} err="failed to get container status \"58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f\": rpc error: code = NotFound desc = could not find container \"58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f\": container with ID starting with 58121bfdc084379d82c3cf9e48f1504800d61d33d872d5054f195a07e708090f not found: ID does not exist" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.377721 5010 scope.go:117] "RemoveContainer" containerID="248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65" Nov 26 18:05:32 crc kubenswrapper[5010]: E1126 18:05:32.377988 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65\": container with ID starting with 248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65 not found: ID does not exist" containerID="248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65" Nov 26 18:05:32 crc kubenswrapper[5010]: I1126 18:05:32.378100 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65"} err="failed to get container status \"248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65\": rpc error: code = NotFound desc = could not find container \"248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65\": container with ID starting with 248ef191f07a8c28827af27a7a96b4f654a7db8427a33fe0ba6176201d81ba65 not found: ID does not exist" Nov 26 18:05:33 crc kubenswrapper[5010]: I1126 18:05:33.908982 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" path="/var/lib/kubelet/pods/13180470-69ca-4cf3-8790-e191b60ec55a/volumes" Nov 26 18:05:49 crc kubenswrapper[5010]: I1126 18:05:49.474744 5010 generic.go:334] "Generic (PLEG): container finished" podID="990bc3e4-a901-447a-b15a-a2fd34d84290" containerID="1441b70562f52170ec362244efc0a58e516515e3dbc005c991880b68d43e65f2" exitCode=0 Nov 26 18:05:49 crc kubenswrapper[5010]: I1126 18:05:49.475269 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" event={"ID":"990bc3e4-a901-447a-b15a-a2fd34d84290","Type":"ContainerDied","Data":"1441b70562f52170ec362244efc0a58e516515e3dbc005c991880b68d43e65f2"} Nov 26 18:05:50 crc kubenswrapper[5010]: I1126 18:05:50.979850 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.104570 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-agent-neutron-config-0\") pod \"990bc3e4-a901-447a-b15a-a2fd34d84290\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.104737 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbfr4\" (UniqueName: \"kubernetes.io/projected/990bc3e4-a901-447a-b15a-a2fd34d84290-kube-api-access-zbfr4\") pod \"990bc3e4-a901-447a-b15a-a2fd34d84290\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.104767 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-inventory\") pod \"990bc3e4-a901-447a-b15a-a2fd34d84290\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.104896 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-combined-ca-bundle\") pod \"990bc3e4-a901-447a-b15a-a2fd34d84290\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.104983 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-ssh-key\") pod \"990bc3e4-a901-447a-b15a-a2fd34d84290\" (UID: \"990bc3e4-a901-447a-b15a-a2fd34d84290\") " Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.111551 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/990bc3e4-a901-447a-b15a-a2fd34d84290-kube-api-access-zbfr4" (OuterVolumeSpecName: "kube-api-access-zbfr4") pod "990bc3e4-a901-447a-b15a-a2fd34d84290" (UID: "990bc3e4-a901-447a-b15a-a2fd34d84290"). InnerVolumeSpecName "kube-api-access-zbfr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.112279 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-combined-ca-bundle" (OuterVolumeSpecName: "neutron-dhcp-combined-ca-bundle") pod "990bc3e4-a901-447a-b15a-a2fd34d84290" (UID: "990bc3e4-a901-447a-b15a-a2fd34d84290"). InnerVolumeSpecName "neutron-dhcp-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.145026 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-inventory" (OuterVolumeSpecName: "inventory") pod "990bc3e4-a901-447a-b15a-a2fd34d84290" (UID: "990bc3e4-a901-447a-b15a-a2fd34d84290"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.151540 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "990bc3e4-a901-447a-b15a-a2fd34d84290" (UID: "990bc3e4-a901-447a-b15a-a2fd34d84290"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.154586 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-dhcp-agent-neutron-config-0") pod "990bc3e4-a901-447a-b15a-a2fd34d84290" (UID: "990bc3e4-a901-447a-b15a-a2fd34d84290"). InnerVolumeSpecName "neutron-dhcp-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.210309 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.210347 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.210366 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbfr4\" (UniqueName: \"kubernetes.io/projected/990bc3e4-a901-447a-b15a-a2fd34d84290-kube-api-access-zbfr4\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.210380 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.210393 5010 reconciler_common.go:293] "Volume detached for volume \"neutron-dhcp-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/990bc3e4-a901-447a-b15a-a2fd34d84290-neutron-dhcp-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.502901 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" event={"ID":"990bc3e4-a901-447a-b15a-a2fd34d84290","Type":"ContainerDied","Data":"ca1db54b74b3ac770c9542505b89096fbdf12a58186323e2b58ddfd11112ba7f"} Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.502946 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca1db54b74b3ac770c9542505b89096fbdf12a58186323e2b58ddfd11112ba7f" Nov 26 18:05:51 crc kubenswrapper[5010]: I1126 18:05:51.502986 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-dhcp-openstack-openstack-cell1-br7dh" Nov 26 18:06:11 crc kubenswrapper[5010]: I1126 18:06:11.423247 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:06:11 crc kubenswrapper[5010]: I1126 18:06:11.423877 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:06:20 crc kubenswrapper[5010]: I1126 18:06:20.581198 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 18:06:20 crc kubenswrapper[5010]: I1126 18:06:20.582015 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="47e9d252-9ba4-4d0d-9376-5e55278708b6" containerName="nova-cell0-conductor-conductor" containerID="cri-o://c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" gracePeriod=30 Nov 26 18:06:20 crc kubenswrapper[5010]: I1126 18:06:20.641789 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 18:06:20 crc kubenswrapper[5010]: I1126 18:06:20.642297 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="bf0e2af8-bfad-41a1-af7e-5df8046f4c51" containerName="nova-cell1-conductor-conductor" containerID="cri-o://33524c69d30acdf21bdae789527d997f331d4e645bc459959ee72f3858610521" gracePeriod=30 Nov 26 18:06:20 crc kubenswrapper[5010]: E1126 18:06:20.671393 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 18:06:20 crc kubenswrapper[5010]: E1126 18:06:20.672910 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 18:06:20 crc kubenswrapper[5010]: E1126 18:06:20.674686 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Nov 26 18:06:20 crc kubenswrapper[5010]: E1126 18:06:20.674764 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="47e9d252-9ba4-4d0d-9376-5e55278708b6" containerName="nova-cell0-conductor-conductor" Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.362748 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.363314 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="f7b955c7-f81c-41c1-aba7-75dac6c8281d" containerName="nova-scheduler-scheduler" containerID="cri-o://86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" gracePeriod=30 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.383559 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.383854 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-log" containerID="cri-o://6708c1e4535a7b8f472b85b0cbb474bc683659dd9f4915f1d64fa4e939504f05" gracePeriod=30 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.383949 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-api" containerID="cri-o://1294c1a84215055821e00cdaa63179c1e469c4adae2aadcbb6d8a0fbdbf15af3" gracePeriod=30 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.440594 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.440854 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-log" containerID="cri-o://0d5eb9baf703c29ae969ea38e5dab0f1636b77b82498c3e1917f7088fde488dd" gracePeriod=30 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.440986 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-metadata" containerID="cri-o://3479b37319a03cc7eb1fcfe947294c65c254f47c77a5bd2d2b976a8aeb25b40f" gracePeriod=30 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.909087 5010 generic.go:334] "Generic (PLEG): container finished" podID="bf0e2af8-bfad-41a1-af7e-5df8046f4c51" containerID="33524c69d30acdf21bdae789527d997f331d4e645bc459959ee72f3858610521" exitCode=0 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.912345 5010 generic.go:334] "Generic (PLEG): container finished" podID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerID="6708c1e4535a7b8f472b85b0cbb474bc683659dd9f4915f1d64fa4e939504f05" exitCode=143 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.915277 5010 generic.go:334] "Generic (PLEG): container finished" podID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerID="0d5eb9baf703c29ae969ea38e5dab0f1636b77b82498c3e1917f7088fde488dd" exitCode=143 Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.915918 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"bf0e2af8-bfad-41a1-af7e-5df8046f4c51","Type":"ContainerDied","Data":"33524c69d30acdf21bdae789527d997f331d4e645bc459959ee72f3858610521"} Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.915952 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d552405f-057e-416b-9540-bf0f0f0d2b7b","Type":"ContainerDied","Data":"6708c1e4535a7b8f472b85b0cbb474bc683659dd9f4915f1d64fa4e939504f05"} Nov 26 18:06:21 crc kubenswrapper[5010]: I1126 18:06:21.915967 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"27b7d05d-503e-4417-abc0-4c9b58aec030","Type":"ContainerDied","Data":"0d5eb9baf703c29ae969ea38e5dab0f1636b77b82498c3e1917f7088fde488dd"} Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.143855 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.214786 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qdbbh\" (UniqueName: \"kubernetes.io/projected/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-kube-api-access-qdbbh\") pod \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.215170 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-combined-ca-bundle\") pod \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.215885 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-config-data\") pod \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\" (UID: \"bf0e2af8-bfad-41a1-af7e-5df8046f4c51\") " Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.223291 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-kube-api-access-qdbbh" (OuterVolumeSpecName: "kube-api-access-qdbbh") pod "bf0e2af8-bfad-41a1-af7e-5df8046f4c51" (UID: "bf0e2af8-bfad-41a1-af7e-5df8046f4c51"). InnerVolumeSpecName "kube-api-access-qdbbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.264414 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-config-data" (OuterVolumeSpecName: "config-data") pod "bf0e2af8-bfad-41a1-af7e-5df8046f4c51" (UID: "bf0e2af8-bfad-41a1-af7e-5df8046f4c51"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.288687 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bf0e2af8-bfad-41a1-af7e-5df8046f4c51" (UID: "bf0e2af8-bfad-41a1-af7e-5df8046f4c51"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.321321 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.321359 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.321368 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qdbbh\" (UniqueName: \"kubernetes.io/projected/bf0e2af8-bfad-41a1-af7e-5df8046f4c51-kube-api-access-qdbbh\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:22 crc kubenswrapper[5010]: E1126 18:06:22.665133 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 18:06:22 crc kubenswrapper[5010]: E1126 18:06:22.666293 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 18:06:22 crc kubenswrapper[5010]: E1126 18:06:22.668253 5010 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 26 18:06:22 crc kubenswrapper[5010]: E1126 18:06:22.668286 5010 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="f7b955c7-f81c-41c1-aba7-75dac6c8281d" containerName="nova-scheduler-scheduler" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.928094 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"bf0e2af8-bfad-41a1-af7e-5df8046f4c51","Type":"ContainerDied","Data":"dc9e42b2b81090a123f96dc061204561bfc9e024f877f533d6318ca2ef9401c3"} Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.928181 5010 scope.go:117] "RemoveContainer" containerID="33524c69d30acdf21bdae789527d997f331d4e645bc459959ee72f3858610521" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.928388 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:22 crc kubenswrapper[5010]: I1126 18:06:22.999549 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.015582 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.026427 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027168 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="990bc3e4-a901-447a-b15a-a2fd34d84290" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027200 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="990bc3e4-a901-447a-b15a-a2fd34d84290" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027236 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="extract-content" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027250 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="extract-content" Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027286 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="registry-server" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027319 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="registry-server" Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027344 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="extract-content" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027360 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="extract-content" Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027391 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="extract-utilities" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027406 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="extract-utilities" Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027435 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="registry-server" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027449 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="registry-server" Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027474 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf0e2af8-bfad-41a1-af7e-5df8046f4c51" containerName="nova-cell1-conductor-conductor" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027488 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf0e2af8-bfad-41a1-af7e-5df8046f4c51" containerName="nova-cell1-conductor-conductor" Nov 26 18:06:23 crc kubenswrapper[5010]: E1126 18:06:23.027516 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="extract-utilities" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027527 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="extract-utilities" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027888 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="990bc3e4-a901-447a-b15a-a2fd34d84290" containerName="neutron-dhcp-openstack-openstack-cell1" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027914 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf0e2af8-bfad-41a1-af7e-5df8046f4c51" containerName="nova-cell1-conductor-conductor" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027951 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbee8cbc-2a17-49b5-8595-3211266c3fa6" containerName="registry-server" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.027979 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="13180470-69ca-4cf3-8790-e191b60ec55a" containerName="registry-server" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.029269 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.032258 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.035917 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.138532 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c175dd6-451e-4a91-8aea-f46e31c375a6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.138911 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqpxp\" (UniqueName: \"kubernetes.io/projected/5c175dd6-451e-4a91-8aea-f46e31c375a6-kube-api-access-qqpxp\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.139026 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c175dd6-451e-4a91-8aea-f46e31c375a6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.241842 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c175dd6-451e-4a91-8aea-f46e31c375a6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.242118 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqpxp\" (UniqueName: \"kubernetes.io/projected/5c175dd6-451e-4a91-8aea-f46e31c375a6-kube-api-access-qqpxp\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.242306 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c175dd6-451e-4a91-8aea-f46e31c375a6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.610943 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqpxp\" (UniqueName: \"kubernetes.io/projected/5c175dd6-451e-4a91-8aea-f46e31c375a6-kube-api-access-qqpxp\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.611068 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c175dd6-451e-4a91-8aea-f46e31c375a6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.611472 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c175dd6-451e-4a91-8aea-f46e31c375a6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"5c175dd6-451e-4a91-8aea-f46e31c375a6\") " pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.660013 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:23 crc kubenswrapper[5010]: I1126 18:06:23.908373 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf0e2af8-bfad-41a1-af7e-5df8046f4c51" path="/var/lib/kubelet/pods/bf0e2af8-bfad-41a1-af7e-5df8046f4c51/volumes" Nov 26 18:06:24 crc kubenswrapper[5010]: I1126 18:06:24.175603 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.133554 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5c175dd6-451e-4a91-8aea-f46e31c375a6","Type":"ContainerStarted","Data":"0d7d1ce69aae5de913c5f149ac836a232f4db1da825701c2e044d8760e658741"} Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.134196 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"5c175dd6-451e-4a91-8aea-f46e31c375a6","Type":"ContainerStarted","Data":"b99a5405bd02375c4a2b598341cbfd10c030607982f8b72083f87976b0f834b3"} Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.135620 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.179462 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=3.179423038 podStartE2EDuration="3.179423038s" podCreationTimestamp="2025-11-26 18:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:06:25.162348112 +0000 UTC m=+9605.953065270" watchObservedRunningTime="2025-11-26 18:06:25.179423038 +0000 UTC m=+9605.970140226" Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.608517 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.813738 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-combined-ca-bundle\") pod \"47e9d252-9ba4-4d0d-9376-5e55278708b6\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.814088 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-config-data\") pod \"47e9d252-9ba4-4d0d-9376-5e55278708b6\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " Nov 26 18:06:25 crc kubenswrapper[5010]: I1126 18:06:25.814111 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjtr2\" (UniqueName: \"kubernetes.io/projected/47e9d252-9ba4-4d0d-9376-5e55278708b6-kube-api-access-zjtr2\") pod \"47e9d252-9ba4-4d0d-9376-5e55278708b6\" (UID: \"47e9d252-9ba4-4d0d-9376-5e55278708b6\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.153805 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"47e9d252-9ba4-4d0d-9376-5e55278708b6","Type":"ContainerDied","Data":"c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de"} Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.153771 5010 generic.go:334] "Generic (PLEG): container finished" podID="47e9d252-9ba4-4d0d-9376-5e55278708b6" containerID="c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" exitCode=0 Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.154242 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.154256 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"47e9d252-9ba4-4d0d-9376-5e55278708b6","Type":"ContainerDied","Data":"162868e448866d13d79d9cd534fe3d245caa9e03bcc08032c6856292f68dea27"} Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.154299 5010 scope.go:117] "RemoveContainer" containerID="c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.159815 5010 generic.go:334] "Generic (PLEG): container finished" podID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerID="1294c1a84215055821e00cdaa63179c1e469c4adae2aadcbb6d8a0fbdbf15af3" exitCode=0 Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.159895 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d552405f-057e-416b-9540-bf0f0f0d2b7b","Type":"ContainerDied","Data":"1294c1a84215055821e00cdaa63179c1e469c4adae2aadcbb6d8a0fbdbf15af3"} Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.162139 5010 generic.go:334] "Generic (PLEG): container finished" podID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerID="3479b37319a03cc7eb1fcfe947294c65c254f47c77a5bd2d2b976a8aeb25b40f" exitCode=0 Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.163172 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"27b7d05d-503e-4417-abc0-4c9b58aec030","Type":"ContainerDied","Data":"3479b37319a03cc7eb1fcfe947294c65c254f47c77a5bd2d2b976a8aeb25b40f"} Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.424306 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47e9d252-9ba4-4d0d-9376-5e55278708b6-kube-api-access-zjtr2" (OuterVolumeSpecName: "kube-api-access-zjtr2") pod "47e9d252-9ba4-4d0d-9376-5e55278708b6" (UID: "47e9d252-9ba4-4d0d-9376-5e55278708b6"). InnerVolumeSpecName "kube-api-access-zjtr2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.434311 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjtr2\" (UniqueName: \"kubernetes.io/projected/47e9d252-9ba4-4d0d-9376-5e55278708b6-kube-api-access-zjtr2\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.482072 5010 scope.go:117] "RemoveContainer" containerID="c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" Nov 26 18:06:26 crc kubenswrapper[5010]: E1126 18:06:26.483089 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de\": container with ID starting with c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de not found: ID does not exist" containerID="c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.483129 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de"} err="failed to get container status \"c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de\": rpc error: code = NotFound desc = could not find container \"c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de\": container with ID starting with c7614db6c2814f18113ac76b4f3f739eba61760b2c0c85af721b2283e85915de not found: ID does not exist" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.499368 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb"] Nov 26 18:06:26 crc kubenswrapper[5010]: E1126 18:06:26.499931 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47e9d252-9ba4-4d0d-9376-5e55278708b6" containerName="nova-cell0-conductor-conductor" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.499949 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="47e9d252-9ba4-4d0d-9376-5e55278708b6" containerName="nova-cell0-conductor-conductor" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.500165 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="47e9d252-9ba4-4d0d-9376-5e55278708b6" containerName="nova-cell0-conductor-conductor" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.500949 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.514150 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb"] Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.520383 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-cells-global-config" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.520379 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-adoption-secret" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.520568 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.521173 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-dockercfg-4zsrg" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.521320 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.521466 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.523015 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-cell1" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.567191 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47e9d252-9ba4-4d0d-9376-5e55278708b6" (UID: "47e9d252-9ba4-4d0d-9376-5e55278708b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.581906 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-config-data" (OuterVolumeSpecName: "config-data") pod "47e9d252-9ba4-4d0d-9376-5e55278708b6" (UID: "47e9d252-9ba4-4d0d-9376-5e55278708b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.638339 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.638391 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.638704 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.638760 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.638873 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.638895 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.639101 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6v878\" (UniqueName: \"kubernetes.io/projected/ba894d64-c3e3-4595-a376-bfdc8429afca-kube-api-access-6v878\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.639295 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.639345 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.639467 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.639505 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47e9d252-9ba4-4d0d-9376-5e55278708b6-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.716367 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.726856 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.743315 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.743582 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.743664 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.743683 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.743812 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6v878\" (UniqueName: \"kubernetes.io/projected/ba894d64-c3e3-4595-a376-bfdc8429afca-kube-api-access-6v878\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.743966 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.743990 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.744219 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.744289 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.746536 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cells-global-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.753142 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.753977 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-inventory\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.757427 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.759410 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-1\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.766619 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-combined-ca-bundle\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.774088 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-0\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.800343 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-ssh-key\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.805002 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6v878\" (UniqueName: \"kubernetes.io/projected/ba894d64-c3e3-4595-a376-bfdc8429afca-kube-api-access-6v878\") pod \"nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845204 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-combined-ca-bundle\") pod \"d552405f-057e-416b-9540-bf0f0f0d2b7b\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845369 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvb8q\" (UniqueName: \"kubernetes.io/projected/27b7d05d-503e-4417-abc0-4c9b58aec030-kube-api-access-zvb8q\") pod \"27b7d05d-503e-4417-abc0-4c9b58aec030\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845448 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-config-data\") pod \"d552405f-057e-416b-9540-bf0f0f0d2b7b\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845476 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkdcm\" (UniqueName: \"kubernetes.io/projected/d552405f-057e-416b-9540-bf0f0f0d2b7b-kube-api-access-qkdcm\") pod \"d552405f-057e-416b-9540-bf0f0f0d2b7b\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845491 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-internal-tls-certs\") pod \"d552405f-057e-416b-9540-bf0f0f0d2b7b\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845542 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-public-tls-certs\") pod \"d552405f-057e-416b-9540-bf0f0f0d2b7b\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845584 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27b7d05d-503e-4417-abc0-4c9b58aec030-logs\") pod \"27b7d05d-503e-4417-abc0-4c9b58aec030\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845601 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-combined-ca-bundle\") pod \"27b7d05d-503e-4417-abc0-4c9b58aec030\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845740 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-config-data\") pod \"27b7d05d-503e-4417-abc0-4c9b58aec030\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845757 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d552405f-057e-416b-9540-bf0f0f0d2b7b-logs\") pod \"d552405f-057e-416b-9540-bf0f0f0d2b7b\" (UID: \"d552405f-057e-416b-9540-bf0f0f0d2b7b\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.845774 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-nova-metadata-tls-certs\") pod \"27b7d05d-503e-4417-abc0-4c9b58aec030\" (UID: \"27b7d05d-503e-4417-abc0-4c9b58aec030\") " Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.850469 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/27b7d05d-503e-4417-abc0-4c9b58aec030-logs" (OuterVolumeSpecName: "logs") pod "27b7d05d-503e-4417-abc0-4c9b58aec030" (UID: "27b7d05d-503e-4417-abc0-4c9b58aec030"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.850531 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d552405f-057e-416b-9540-bf0f0f0d2b7b-logs" (OuterVolumeSpecName: "logs") pod "d552405f-057e-416b-9540-bf0f0f0d2b7b" (UID: "d552405f-057e-416b-9540-bf0f0f0d2b7b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.873969 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.881641 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27b7d05d-503e-4417-abc0-4c9b58aec030-kube-api-access-zvb8q" (OuterVolumeSpecName: "kube-api-access-zvb8q") pod "27b7d05d-503e-4417-abc0-4c9b58aec030" (UID: "27b7d05d-503e-4417-abc0-4c9b58aec030"). InnerVolumeSpecName "kube-api-access-zvb8q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.904236 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.904813 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d552405f-057e-416b-9540-bf0f0f0d2b7b-kube-api-access-qkdcm" (OuterVolumeSpecName: "kube-api-access-qkdcm") pod "d552405f-057e-416b-9540-bf0f0f0d2b7b" (UID: "d552405f-057e-416b-9540-bf0f0f0d2b7b"). InnerVolumeSpecName "kube-api-access-qkdcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.923920 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d552405f-057e-416b-9540-bf0f0f0d2b7b" (UID: "d552405f-057e-416b-9540-bf0f0f0d2b7b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.930923 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-config-data" (OuterVolumeSpecName: "config-data") pod "27b7d05d-503e-4417-abc0-4c9b58aec030" (UID: "27b7d05d-503e-4417-abc0-4c9b58aec030"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.943419 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "27b7d05d-503e-4417-abc0-4c9b58aec030" (UID: "27b7d05d-503e-4417-abc0-4c9b58aec030"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.948608 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvb8q\" (UniqueName: \"kubernetes.io/projected/27b7d05d-503e-4417-abc0-4c9b58aec030-kube-api-access-zvb8q\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.948636 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkdcm\" (UniqueName: \"kubernetes.io/projected/d552405f-057e-416b-9540-bf0f0f0d2b7b-kube-api-access-qkdcm\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.948645 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/27b7d05d-503e-4417-abc0-4c9b58aec030-logs\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.948654 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.948662 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.948670 5010 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d552405f-057e-416b-9540-bf0f0f0d2b7b-logs\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.948678 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.973142 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.979123 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 18:06:26 crc kubenswrapper[5010]: I1126 18:06:26.985679 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "d552405f-057e-416b-9540-bf0f0f0d2b7b" (UID: "d552405f-057e-416b-9540-bf0f0f0d2b7b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.006471 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: E1126 18:06:27.007104 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-api" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.007178 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-api" Nov 26 18:06:27 crc kubenswrapper[5010]: E1126 18:06:27.007257 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-log" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.007335 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-log" Nov 26 18:06:27 crc kubenswrapper[5010]: E1126 18:06:27.007414 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-log" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.007478 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-log" Nov 26 18:06:27 crc kubenswrapper[5010]: E1126 18:06:27.007540 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-metadata" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.007599 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-metadata" Nov 26 18:06:27 crc kubenswrapper[5010]: E1126 18:06:27.007663 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7b955c7-f81c-41c1-aba7-75dac6c8281d" containerName="nova-scheduler-scheduler" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.007743 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7b955c7-f81c-41c1-aba7-75dac6c8281d" containerName="nova-scheduler-scheduler" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.007992 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-log" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.008082 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-log" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.008152 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7b955c7-f81c-41c1-aba7-75dac6c8281d" containerName="nova-scheduler-scheduler" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.008224 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-metadata" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.008289 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" containerName="nova-api-api" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.009465 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.012768 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.025039 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.026934 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-config-data" (OuterVolumeSpecName: "config-data") pod "d552405f-057e-416b-9540-bf0f0f0d2b7b" (UID: "d552405f-057e-416b-9540-bf0f0f0d2b7b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.037016 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "27b7d05d-503e-4417-abc0-4c9b58aec030" (UID: "27b7d05d-503e-4417-abc0-4c9b58aec030"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.037890 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d552405f-057e-416b-9540-bf0f0f0d2b7b" (UID: "d552405f-057e-416b-9540-bf0f0f0d2b7b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.050150 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4bgn\" (UniqueName: \"kubernetes.io/projected/f7b955c7-f81c-41c1-aba7-75dac6c8281d-kube-api-access-d4bgn\") pod \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.050487 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-combined-ca-bundle\") pod \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.050610 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-config-data\") pod \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\" (UID: \"f7b955c7-f81c-41c1-aba7-75dac6c8281d\") " Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.059056 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.059108 5010 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.059120 5010 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d552405f-057e-416b-9540-bf0f0f0d2b7b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.059191 5010 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/27b7d05d-503e-4417-abc0-4c9b58aec030-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.064173 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7b955c7-f81c-41c1-aba7-75dac6c8281d-kube-api-access-d4bgn" (OuterVolumeSpecName: "kube-api-access-d4bgn") pod "f7b955c7-f81c-41c1-aba7-75dac6c8281d" (UID: "f7b955c7-f81c-41c1-aba7-75dac6c8281d"). InnerVolumeSpecName "kube-api-access-d4bgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.094852 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f7b955c7-f81c-41c1-aba7-75dac6c8281d" (UID: "f7b955c7-f81c-41c1-aba7-75dac6c8281d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:27 crc kubenswrapper[5010]: E1126 18:06:27.123817 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47e9d252_9ba4_4d0d_9376_5e55278708b6.slice\": RecentStats: unable to find data in memory cache]" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.161076 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.161404 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6frss\" (UniqueName: \"kubernetes.io/projected/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-kube-api-access-6frss\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.161460 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.161510 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4bgn\" (UniqueName: \"kubernetes.io/projected/f7b955c7-f81c-41c1-aba7-75dac6c8281d-kube-api-access-d4bgn\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.161520 5010 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.176013 5010 generic.go:334] "Generic (PLEG): container finished" podID="f7b955c7-f81c-41c1-aba7-75dac6c8281d" containerID="86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" exitCode=0 Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.176101 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f7b955c7-f81c-41c1-aba7-75dac6c8281d","Type":"ContainerDied","Data":"86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d"} Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.176158 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"f7b955c7-f81c-41c1-aba7-75dac6c8281d","Type":"ContainerDied","Data":"a6db2911ebcf5a59aedeee43e823652448e72f9231624ffcfdf1c56a2bdbc7ea"} Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.176177 5010 scope.go:117] "RemoveContainer" containerID="86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.176281 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.182412 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d552405f-057e-416b-9540-bf0f0f0d2b7b","Type":"ContainerDied","Data":"1b02597722483ce704776eb3a5d982f9ae0ca278a059d0b3d56b16f565cdd06f"} Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.182536 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.196409 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"27b7d05d-503e-4417-abc0-4c9b58aec030","Type":"ContainerDied","Data":"3107d9fc1f7afb1f7e7c57b3d21ad7a84968d16623b59dc1c8a99b589b230e27"} Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.196509 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.206020 5010 scope.go:117] "RemoveContainer" containerID="86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" Nov 26 18:06:27 crc kubenswrapper[5010]: E1126 18:06:27.206423 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d\": container with ID starting with 86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d not found: ID does not exist" containerID="86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.206460 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d"} err="failed to get container status \"86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d\": rpc error: code = NotFound desc = could not find container \"86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d\": container with ID starting with 86bdd9da2472d2e6019a83e22fc40388b6664b3b78f8fae2d616baf622923b7d not found: ID does not exist" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.206486 5010 scope.go:117] "RemoveContainer" containerID="1294c1a84215055821e00cdaa63179c1e469c4adae2aadcbb6d8a0fbdbf15af3" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.206674 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-config-data" (OuterVolumeSpecName: "config-data") pod "f7b955c7-f81c-41c1-aba7-75dac6c8281d" (UID: "f7b955c7-f81c-41c1-aba7-75dac6c8281d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.250541 5010 scope.go:117] "RemoveContainer" containerID="6708c1e4535a7b8f472b85b0cbb474bc683659dd9f4915f1d64fa4e939504f05" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.253549 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.262779 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.262893 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6frss\" (UniqueName: \"kubernetes.io/projected/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-kube-api-access-6frss\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.262951 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.263063 5010 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f7b955c7-f81c-41c1-aba7-75dac6c8281d-config-data\") on node \"crc\" DevicePath \"\"" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.269406 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.269727 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.271487 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.289224 5010 scope.go:117] "RemoveContainer" containerID="3479b37319a03cc7eb1fcfe947294c65c254f47c77a5bd2d2b976a8aeb25b40f" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.293854 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6frss\" (UniqueName: \"kubernetes.io/projected/b7fdf798-30ad-49bc-9c7a-7684b52e34bf-kube-api-access-6frss\") pod \"nova-cell0-conductor-0\" (UID: \"b7fdf798-30ad-49bc-9c7a-7684b52e34bf\") " pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.293914 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.312359 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.319325 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.322263 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.324561 5010 scope.go:117] "RemoveContainer" containerID="0d5eb9baf703c29ae969ea38e5dab0f1636b77b82498c3e1917f7088fde488dd" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.325153 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.325199 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.325154 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.329124 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.333436 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.339637 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.342333 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.344255 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.344627 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.349057 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.471597 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.471957 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbe636bf-8dbb-47f5-9af6-50601035a730-logs\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.471994 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-config-data\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472120 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472168 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472192 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6zxs\" (UniqueName: \"kubernetes.io/projected/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-kube-api-access-l6zxs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472207 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472255 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-public-tls-certs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472281 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmmtm\" (UniqueName: \"kubernetes.io/projected/cbe636bf-8dbb-47f5-9af6-50601035a730-kube-api-access-bmmtm\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472301 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-config-data\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.472327 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-logs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.528396 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.554440 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.568325 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.569853 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.572380 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573765 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573813 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573838 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6zxs\" (UniqueName: \"kubernetes.io/projected/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-kube-api-access-l6zxs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573856 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573899 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-public-tls-certs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573923 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmmtm\" (UniqueName: \"kubernetes.io/projected/cbe636bf-8dbb-47f5-9af6-50601035a730-kube-api-access-bmmtm\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573941 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-config-data\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.573963 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-logs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.574014 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.574035 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbe636bf-8dbb-47f5-9af6-50601035a730-logs\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.574059 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-config-data\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.575097 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cbe636bf-8dbb-47f5-9af6-50601035a730-logs\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.575370 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-logs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.577574 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.578931 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.581174 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.581410 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-public-tls-certs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.583112 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.583136 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbe636bf-8dbb-47f5-9af6-50601035a730-config-data\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.584938 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-config-data\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.603864 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.606557 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmmtm\" (UniqueName: \"kubernetes.io/projected/cbe636bf-8dbb-47f5-9af6-50601035a730-kube-api-access-bmmtm\") pod \"nova-metadata-0\" (UID: \"cbe636bf-8dbb-47f5-9af6-50601035a730\") " pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.612336 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6zxs\" (UniqueName: \"kubernetes.io/projected/bc980ee9-e3a1-4293-9030-4bd470e8d0f9-kube-api-access-l6zxs\") pod \"nova-api-0\" (UID: \"bc980ee9-e3a1-4293-9030-4bd470e8d0f9\") " pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.617100 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.647918 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.670162 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.675617 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dq7qq\" (UniqueName: \"kubernetes.io/projected/e3f23011-c51f-4c71-b83a-fd35b10153e4-kube-api-access-dq7qq\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.675670 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f23011-c51f-4c71-b83a-fd35b10153e4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.675758 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3f23011-c51f-4c71-b83a-fd35b10153e4-config-data\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.778061 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dq7qq\" (UniqueName: \"kubernetes.io/projected/e3f23011-c51f-4c71-b83a-fd35b10153e4-kube-api-access-dq7qq\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.778327 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f23011-c51f-4c71-b83a-fd35b10153e4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.778385 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3f23011-c51f-4c71-b83a-fd35b10153e4-config-data\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.794733 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3f23011-c51f-4c71-b83a-fd35b10153e4-config-data\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.794812 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3f23011-c51f-4c71-b83a-fd35b10153e4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.794810 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dq7qq\" (UniqueName: \"kubernetes.io/projected/e3f23011-c51f-4c71-b83a-fd35b10153e4-kube-api-access-dq7qq\") pod \"nova-scheduler-0\" (UID: \"e3f23011-c51f-4c71-b83a-fd35b10153e4\") " pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.858306 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.878003 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.911894 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" path="/var/lib/kubelet/pods/27b7d05d-503e-4417-abc0-4c9b58aec030/volumes" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.915061 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47e9d252-9ba4-4d0d-9376-5e55278708b6" path="/var/lib/kubelet/pods/47e9d252-9ba4-4d0d-9376-5e55278708b6/volumes" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.915640 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d552405f-057e-416b-9540-bf0f0f0d2b7b" path="/var/lib/kubelet/pods/d552405f-057e-416b-9540-bf0f0f0d2b7b/volumes" Nov 26 18:06:27 crc kubenswrapper[5010]: I1126 18:06:27.918425 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7b955c7-f81c-41c1-aba7-75dac6c8281d" path="/var/lib/kubelet/pods/f7b955c7-f81c-41c1-aba7-75dac6c8281d/volumes" Nov 26 18:06:28 crc kubenswrapper[5010]: W1126 18:06:28.174621 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc980ee9_e3a1_4293_9030_4bd470e8d0f9.slice/crio-5b86e323af72b30cff9ddab978ad46f7a0524d457c0f5515cee78798bc1631a1 WatchSource:0}: Error finding container 5b86e323af72b30cff9ddab978ad46f7a0524d457c0f5515cee78798bc1631a1: Status 404 returned error can't find the container with id 5b86e323af72b30cff9ddab978ad46f7a0524d457c0f5515cee78798bc1631a1 Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.178556 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.208413 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" event={"ID":"ba894d64-c3e3-4595-a376-bfdc8429afca","Type":"ContainerStarted","Data":"96a9b4208efbd4d890bbf989accf3049502103e1d8b63530bb57d7f3a61323ca"} Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.210252 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bc980ee9-e3a1-4293-9030-4bd470e8d0f9","Type":"ContainerStarted","Data":"5b86e323af72b30cff9ddab978ad46f7a0524d457c0f5515cee78798bc1631a1"} Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.212819 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b7fdf798-30ad-49bc-9c7a-7684b52e34bf","Type":"ContainerStarted","Data":"36a4eed7df4e7a2d9dd8643bff177cd8fd8c3336afc784c5a79d5a0d54be0f87"} Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.212857 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"b7fdf798-30ad-49bc-9c7a-7684b52e34bf","Type":"ContainerStarted","Data":"1bd091f1e4fc42b51e39a066a25a1f93ae7ae52d62c154d56bdfb29550a39adb"} Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.212927 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.240547 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.24052109 podStartE2EDuration="2.24052109s" podCreationTimestamp="2025-11-26 18:06:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:06:28.234979242 +0000 UTC m=+9609.025696420" watchObservedRunningTime="2025-11-26 18:06:28.24052109 +0000 UTC m=+9609.031238268" Nov 26 18:06:28 crc kubenswrapper[5010]: I1126 18:06:28.288808 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.061754 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 26 18:06:29 crc kubenswrapper[5010]: W1126 18:06:29.072234 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3f23011_c51f_4c71_b83a_fd35b10153e4.slice/crio-1ae090983979082adca485b66048af304853fedead7d6fd314c1da7991ae1858 WatchSource:0}: Error finding container 1ae090983979082adca485b66048af304853fedead7d6fd314c1da7991ae1858: Status 404 returned error can't find the container with id 1ae090983979082adca485b66048af304853fedead7d6fd314c1da7991ae1858 Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.227506 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bc980ee9-e3a1-4293-9030-4bd470e8d0f9","Type":"ContainerStarted","Data":"e6501de4c60c26358cf643351f73bd97a49b205b796c22cc916a7edec6477bf5"} Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.227544 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bc980ee9-e3a1-4293-9030-4bd470e8d0f9","Type":"ContainerStarted","Data":"f95f71166b2e79d5c73ac31c794c8d26bb65eb6aec93a06eccf7dfcfdbcd0f5e"} Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.228694 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e3f23011-c51f-4c71-b83a-fd35b10153e4","Type":"ContainerStarted","Data":"1ae090983979082adca485b66048af304853fedead7d6fd314c1da7991ae1858"} Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.229892 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"cbe636bf-8dbb-47f5-9af6-50601035a730","Type":"ContainerStarted","Data":"424b1b5980548434ee0d885be1b8f4f6b86caa864eaed07d6a4228f16c98e3c1"} Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.229984 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"cbe636bf-8dbb-47f5-9af6-50601035a730","Type":"ContainerStarted","Data":"5f77803001a48204852336444ac972dbd709af83e20ba969e49125a5d4b343d6"} Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.230036 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"cbe636bf-8dbb-47f5-9af6-50601035a730","Type":"ContainerStarted","Data":"f06395da9d479b97b5d44c4cb5cf834325ae3dd4fff00865d8ea88d947999a33"} Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.256960 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.256941785 podStartE2EDuration="2.256941785s" podCreationTimestamp="2025-11-26 18:06:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:06:29.253366976 +0000 UTC m=+9610.044084144" watchObservedRunningTime="2025-11-26 18:06:29.256941785 +0000 UTC m=+9610.047658933" Nov 26 18:06:29 crc kubenswrapper[5010]: I1126 18:06:29.274887 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.274866043 podStartE2EDuration="2.274866043s" podCreationTimestamp="2025-11-26 18:06:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:06:29.271822397 +0000 UTC m=+9610.062539545" watchObservedRunningTime="2025-11-26 18:06:29.274866043 +0000 UTC m=+9610.065583191" Nov 26 18:06:30 crc kubenswrapper[5010]: I1126 18:06:30.246584 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e3f23011-c51f-4c71-b83a-fd35b10153e4","Type":"ContainerStarted","Data":"8890566f8e1e8fe24a88b35c71e94219dfeb1b3ca1dc3a35e4f570c2cfacf4a5"} Nov 26 18:06:30 crc kubenswrapper[5010]: I1126 18:06:30.251255 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" event={"ID":"ba894d64-c3e3-4595-a376-bfdc8429afca","Type":"ContainerStarted","Data":"0ca7424acd15e63e7fb05034d4fb77ae0e08a60aa984acce6a961eb034f4d403"} Nov 26 18:06:30 crc kubenswrapper[5010]: I1126 18:06:30.278518 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.278499669 podStartE2EDuration="3.278499669s" podCreationTimestamp="2025-11-26 18:06:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:06:30.275776151 +0000 UTC m=+9611.066493329" watchObservedRunningTime="2025-11-26 18:06:30.278499669 +0000 UTC m=+9611.069216827" Nov 26 18:06:30 crc kubenswrapper[5010]: I1126 18:06:30.308444 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" podStartSLOduration=2.928235802 podStartE2EDuration="4.308427396s" podCreationTimestamp="2025-11-26 18:06:26 +0000 UTC" firstStartedPulling="2025-11-26 18:06:27.606311753 +0000 UTC m=+9608.397028901" lastFinishedPulling="2025-11-26 18:06:28.986503347 +0000 UTC m=+9609.777220495" observedRunningTime="2025-11-26 18:06:30.307851892 +0000 UTC m=+9611.098569060" watchObservedRunningTime="2025-11-26 18:06:30.308427396 +0000 UTC m=+9611.099144544" Nov 26 18:06:30 crc kubenswrapper[5010]: I1126 18:06:30.547872 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.109:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 18:06:30 crc kubenswrapper[5010]: I1126 18:06:30.547962 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="27b7d05d-503e-4417-abc0-4c9b58aec030" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.109:8775/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 26 18:06:32 crc kubenswrapper[5010]: I1126 18:06:32.671122 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 18:06:32 crc kubenswrapper[5010]: I1126 18:06:32.671486 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 26 18:06:32 crc kubenswrapper[5010]: I1126 18:06:32.858854 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 26 18:06:33 crc kubenswrapper[5010]: I1126 18:06:33.715202 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 26 18:06:37 crc kubenswrapper[5010]: I1126 18:06:37.375402 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 26 18:06:37 crc kubenswrapper[5010]: I1126 18:06:37.648717 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 18:06:37 crc kubenswrapper[5010]: I1126 18:06:37.649055 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 26 18:06:37 crc kubenswrapper[5010]: I1126 18:06:37.671004 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 18:06:37 crc kubenswrapper[5010]: I1126 18:06:37.672582 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 26 18:06:37 crc kubenswrapper[5010]: I1126 18:06:37.858951 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 26 18:06:37 crc kubenswrapper[5010]: I1126 18:06:37.904663 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 26 18:06:38 crc kubenswrapper[5010]: I1126 18:06:38.405167 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 26 18:06:38 crc kubenswrapper[5010]: I1126 18:06:38.663822 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bc980ee9-e3a1-4293-9030-4bd470e8d0f9" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.1.219:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 18:06:38 crc kubenswrapper[5010]: I1126 18:06:38.663870 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bc980ee9-e3a1-4293-9030-4bd470e8d0f9" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.1.219:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 18:06:38 crc kubenswrapper[5010]: I1126 18:06:38.682796 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="cbe636bf-8dbb-47f5-9af6-50601035a730" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.1.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 18:06:38 crc kubenswrapper[5010]: I1126 18:06:38.682849 5010 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="cbe636bf-8dbb-47f5-9af6-50601035a730" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.1.220:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 26 18:06:41 crc kubenswrapper[5010]: I1126 18:06:41.422939 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:06:41 crc kubenswrapper[5010]: I1126 18:06:41.423462 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.657149 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.657866 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.658352 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.658411 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.666171 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.667216 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.676385 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.684380 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 26 18:06:47 crc kubenswrapper[5010]: I1126 18:06:47.692136 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 18:06:48 crc kubenswrapper[5010]: I1126 18:06:48.507870 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.422866 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.423602 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.423688 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.425371 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b5250fe989f5010f7836a2449aac67959322b23a476a4ba6d58843cef5d7d82f"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.425514 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://b5250fe989f5010f7836a2449aac67959322b23a476a4ba6d58843cef5d7d82f" gracePeriod=600 Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.813925 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="b5250fe989f5010f7836a2449aac67959322b23a476a4ba6d58843cef5d7d82f" exitCode=0 Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.813996 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"b5250fe989f5010f7836a2449aac67959322b23a476a4ba6d58843cef5d7d82f"} Nov 26 18:07:11 crc kubenswrapper[5010]: I1126 18:07:11.814210 5010 scope.go:117] "RemoveContainer" containerID="6a01223e443cf3981ac1ce1070716003b44821c58028af68896408b0b7b6651b" Nov 26 18:07:12 crc kubenswrapper[5010]: I1126 18:07:12.831403 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e"} Nov 26 18:09:11 crc kubenswrapper[5010]: I1126 18:09:11.423306 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:09:11 crc kubenswrapper[5010]: I1126 18:09:11.424873 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:09:41 crc kubenswrapper[5010]: I1126 18:09:41.423153 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:09:41 crc kubenswrapper[5010]: I1126 18:09:41.423991 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:10:11 crc kubenswrapper[5010]: I1126 18:10:11.422601 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:10:11 crc kubenswrapper[5010]: I1126 18:10:11.423290 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:10:11 crc kubenswrapper[5010]: I1126 18:10:11.423359 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 18:10:11 crc kubenswrapper[5010]: I1126 18:10:11.424652 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 18:10:11 crc kubenswrapper[5010]: I1126 18:10:11.424785 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" gracePeriod=600 Nov 26 18:10:11 crc kubenswrapper[5010]: E1126 18:10:11.557174 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:10:12 crc kubenswrapper[5010]: I1126 18:10:12.352323 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" exitCode=0 Nov 26 18:10:12 crc kubenswrapper[5010]: I1126 18:10:12.352378 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e"} Nov 26 18:10:12 crc kubenswrapper[5010]: I1126 18:10:12.352932 5010 scope.go:117] "RemoveContainer" containerID="b5250fe989f5010f7836a2449aac67959322b23a476a4ba6d58843cef5d7d82f" Nov 26 18:10:12 crc kubenswrapper[5010]: I1126 18:10:12.353849 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:10:12 crc kubenswrapper[5010]: E1126 18:10:12.354221 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:10:26 crc kubenswrapper[5010]: I1126 18:10:26.892812 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:10:26 crc kubenswrapper[5010]: E1126 18:10:26.894352 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:10:41 crc kubenswrapper[5010]: I1126 18:10:41.892057 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:10:41 crc kubenswrapper[5010]: E1126 18:10:41.893283 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:10:52 crc kubenswrapper[5010]: I1126 18:10:52.893972 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:10:52 crc kubenswrapper[5010]: E1126 18:10:52.895059 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:11:06 crc kubenswrapper[5010]: I1126 18:11:06.893309 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:11:06 crc kubenswrapper[5010]: E1126 18:11:06.894695 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:11:18 crc kubenswrapper[5010]: I1126 18:11:18.892247 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:11:18 crc kubenswrapper[5010]: E1126 18:11:18.893005 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:11:30 crc kubenswrapper[5010]: I1126 18:11:30.891957 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:11:30 crc kubenswrapper[5010]: E1126 18:11:30.892879 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:11:44 crc kubenswrapper[5010]: I1126 18:11:44.892975 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:11:44 crc kubenswrapper[5010]: E1126 18:11:44.893801 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:11:57 crc kubenswrapper[5010]: I1126 18:11:57.892238 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:11:57 crc kubenswrapper[5010]: E1126 18:11:57.893300 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:12:08 crc kubenswrapper[5010]: I1126 18:12:08.892422 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:12:08 crc kubenswrapper[5010]: E1126 18:12:08.893818 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:12:20 crc kubenswrapper[5010]: I1126 18:12:20.891817 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:12:20 crc kubenswrapper[5010]: E1126 18:12:20.892594 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:12:34 crc kubenswrapper[5010]: I1126 18:12:34.254774 5010 generic.go:334] "Generic (PLEG): container finished" podID="ba894d64-c3e3-4595-a376-bfdc8429afca" containerID="0ca7424acd15e63e7fb05034d4fb77ae0e08a60aa984acce6a961eb034f4d403" exitCode=0 Nov 26 18:12:34 crc kubenswrapper[5010]: I1126 18:12:34.254840 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" event={"ID":"ba894d64-c3e3-4595-a376-bfdc8429afca","Type":"ContainerDied","Data":"0ca7424acd15e63e7fb05034d4fb77ae0e08a60aa984acce6a961eb034f4d403"} Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.726928 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.810626 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-0\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.810900 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-ssh-key\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.810960 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-inventory\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.811007 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-0\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.811172 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-1\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.811637 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-combined-ca-bundle\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.811696 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cells-global-config-0\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.811728 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-1\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.811752 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6v878\" (UniqueName: \"kubernetes.io/projected/ba894d64-c3e3-4595-a376-bfdc8429afca-kube-api-access-6v878\") pod \"ba894d64-c3e3-4595-a376-bfdc8429afca\" (UID: \"ba894d64-c3e3-4595-a376-bfdc8429afca\") " Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.836441 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-combined-ca-bundle" (OuterVolumeSpecName: "nova-cell1-combined-ca-bundle") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "nova-cell1-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.836528 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba894d64-c3e3-4595-a376-bfdc8429afca-kube-api-access-6v878" (OuterVolumeSpecName: "kube-api-access-6v878") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "kube-api-access-6v878". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.848803 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-inventory" (OuterVolumeSpecName: "inventory") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.850107 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.857242 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.858276 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cells-global-config-0" (OuterVolumeSpecName: "nova-cells-global-config-0") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "nova-cells-global-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.860722 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.863266 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.870134 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "ba894d64-c3e3-4595-a376-bfdc8429afca" (UID: "ba894d64-c3e3-4595-a376-bfdc8429afca"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.893199 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:12:35 crc kubenswrapper[5010]: E1126 18:12:35.893678 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914587 5010 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914613 5010 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914622 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914633 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cells-global-config-0\" (UniqueName: \"kubernetes.io/configmap/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cells-global-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914642 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914652 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6v878\" (UniqueName: \"kubernetes.io/projected/ba894d64-c3e3-4595-a376-bfdc8429afca-kube-api-access-6v878\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914660 5010 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914670 5010 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:35 crc kubenswrapper[5010]: I1126 18:12:35.914678 5010 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ba894d64-c3e3-4595-a376-bfdc8429afca-inventory\") on node \"crc\" DevicePath \"\"" Nov 26 18:12:36 crc kubenswrapper[5010]: I1126 18:12:36.286885 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" event={"ID":"ba894d64-c3e3-4595-a376-bfdc8429afca","Type":"ContainerDied","Data":"96a9b4208efbd4d890bbf989accf3049502103e1d8b63530bb57d7f3a61323ca"} Nov 26 18:12:36 crc kubenswrapper[5010]: I1126 18:12:36.286943 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="96a9b4208efbd4d890bbf989accf3049502103e1d8b63530bb57d7f3a61323ca" Nov 26 18:12:36 crc kubenswrapper[5010]: I1126 18:12:36.287326 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb" Nov 26 18:12:50 crc kubenswrapper[5010]: I1126 18:12:50.891926 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:12:50 crc kubenswrapper[5010]: E1126 18:12:50.892851 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:12:57 crc kubenswrapper[5010]: I1126 18:12:57.637138 5010 trace.go:236] Trace[722571121]: "Calculate volume metrics of prometheus-metric-storage-db for pod openstack/prometheus-metric-storage-0" (26-Nov-2025 18:12:56.625) (total time: 1012ms): Nov 26 18:12:57 crc kubenswrapper[5010]: Trace[722571121]: [1.012035916s] [1.012035916s] END Nov 26 18:13:03 crc kubenswrapper[5010]: I1126 18:13:03.895152 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:13:03 crc kubenswrapper[5010]: E1126 18:13:03.897588 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:13:16 crc kubenswrapper[5010]: I1126 18:13:16.891784 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:13:16 crc kubenswrapper[5010]: E1126 18:13:16.892751 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:13:27 crc kubenswrapper[5010]: I1126 18:13:27.892029 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:13:27 crc kubenswrapper[5010]: E1126 18:13:27.892876 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:13:42 crc kubenswrapper[5010]: I1126 18:13:42.893115 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:13:42 crc kubenswrapper[5010]: E1126 18:13:42.894058 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:13:53 crc kubenswrapper[5010]: I1126 18:13:53.892364 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:13:53 crc kubenswrapper[5010]: E1126 18:13:53.893222 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:14:06 crc kubenswrapper[5010]: I1126 18:14:06.891620 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:14:06 crc kubenswrapper[5010]: E1126 18:14:06.892335 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.029461 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-qjc8f"] Nov 26 18:14:09 crc kubenswrapper[5010]: E1126 18:14:09.030193 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba894d64-c3e3-4595-a376-bfdc8429afca" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.030208 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba894d64-c3e3-4595-a376-bfdc8429afca" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.030455 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba894d64-c3e3-4595-a376-bfdc8429afca" containerName="nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.032573 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.057354 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qjc8f"] Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.133521 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-utilities\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.133790 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tznzx\" (UniqueName: \"kubernetes.io/projected/173fa938-5908-4b7b-8146-0957272ec6bf-kube-api-access-tznzx\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.133921 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-catalog-content\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.236296 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-utilities\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.236413 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tznzx\" (UniqueName: \"kubernetes.io/projected/173fa938-5908-4b7b-8146-0957272ec6bf-kube-api-access-tznzx\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.236451 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-catalog-content\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.236884 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-catalog-content\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.236965 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-utilities\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.259503 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tznzx\" (UniqueName: \"kubernetes.io/projected/173fa938-5908-4b7b-8146-0957272ec6bf-kube-api-access-tznzx\") pod \"community-operators-qjc8f\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.352979 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:09 crc kubenswrapper[5010]: I1126 18:14:09.923038 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-qjc8f"] Nov 26 18:14:10 crc kubenswrapper[5010]: I1126 18:14:10.120639 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjc8f" event={"ID":"173fa938-5908-4b7b-8146-0957272ec6bf","Type":"ContainerStarted","Data":"7758b5a24f952ef10592bf9a5131556af4e7edb778aed2bb8ca901a758b5fc2f"} Nov 26 18:14:11 crc kubenswrapper[5010]: I1126 18:14:11.135515 5010 generic.go:334] "Generic (PLEG): container finished" podID="173fa938-5908-4b7b-8146-0957272ec6bf" containerID="569eae260444e6316a747dc345df54d378b71424e800d4b2830ddc288dca6ba1" exitCode=0 Nov 26 18:14:11 crc kubenswrapper[5010]: I1126 18:14:11.135571 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjc8f" event={"ID":"173fa938-5908-4b7b-8146-0957272ec6bf","Type":"ContainerDied","Data":"569eae260444e6316a747dc345df54d378b71424e800d4b2830ddc288dca6ba1"} Nov 26 18:14:11 crc kubenswrapper[5010]: I1126 18:14:11.138300 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 18:14:13 crc kubenswrapper[5010]: I1126 18:14:13.164530 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjc8f" event={"ID":"173fa938-5908-4b7b-8146-0957272ec6bf","Type":"ContainerStarted","Data":"bd41edbdea298d31e912c66333baa31406d8e7c84a84aa2957f8f14ebfcd877e"} Nov 26 18:14:14 crc kubenswrapper[5010]: I1126 18:14:14.199063 5010 generic.go:334] "Generic (PLEG): container finished" podID="173fa938-5908-4b7b-8146-0957272ec6bf" containerID="bd41edbdea298d31e912c66333baa31406d8e7c84a84aa2957f8f14ebfcd877e" exitCode=0 Nov 26 18:14:14 crc kubenswrapper[5010]: I1126 18:14:14.199126 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjc8f" event={"ID":"173fa938-5908-4b7b-8146-0957272ec6bf","Type":"ContainerDied","Data":"bd41edbdea298d31e912c66333baa31406d8e7c84a84aa2957f8f14ebfcd877e"} Nov 26 18:14:15 crc kubenswrapper[5010]: I1126 18:14:15.211541 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjc8f" event={"ID":"173fa938-5908-4b7b-8146-0957272ec6bf","Type":"ContainerStarted","Data":"2ab251ed9b811085915ca27fa93f6435e18ea1e3f016b4f85a6a8b94ab83c357"} Nov 26 18:14:15 crc kubenswrapper[5010]: I1126 18:14:15.244116 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-qjc8f" podStartSLOduration=2.729393141 podStartE2EDuration="6.244090773s" podCreationTimestamp="2025-11-26 18:14:09 +0000 UTC" firstStartedPulling="2025-11-26 18:14:11.138070714 +0000 UTC m=+10071.928787862" lastFinishedPulling="2025-11-26 18:14:14.652768336 +0000 UTC m=+10075.443485494" observedRunningTime="2025-11-26 18:14:15.229607742 +0000 UTC m=+10076.020324930" watchObservedRunningTime="2025-11-26 18:14:15.244090773 +0000 UTC m=+10076.034807941" Nov 26 18:14:19 crc kubenswrapper[5010]: I1126 18:14:19.353431 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:19 crc kubenswrapper[5010]: I1126 18:14:19.354016 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:19 crc kubenswrapper[5010]: I1126 18:14:19.403042 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:20 crc kubenswrapper[5010]: I1126 18:14:20.843061 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:20 crc kubenswrapper[5010]: I1126 18:14:20.891340 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:14:20 crc kubenswrapper[5010]: E1126 18:14:20.891806 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:14:20 crc kubenswrapper[5010]: I1126 18:14:20.923162 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qjc8f"] Nov 26 18:14:22 crc kubenswrapper[5010]: I1126 18:14:22.292681 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-qjc8f" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="registry-server" containerID="cri-o://2ab251ed9b811085915ca27fa93f6435e18ea1e3f016b4f85a6a8b94ab83c357" gracePeriod=2 Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.304474 5010 generic.go:334] "Generic (PLEG): container finished" podID="173fa938-5908-4b7b-8146-0957272ec6bf" containerID="2ab251ed9b811085915ca27fa93f6435e18ea1e3f016b4f85a6a8b94ab83c357" exitCode=0 Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.304884 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjc8f" event={"ID":"173fa938-5908-4b7b-8146-0957272ec6bf","Type":"ContainerDied","Data":"2ab251ed9b811085915ca27fa93f6435e18ea1e3f016b4f85a6a8b94ab83c357"} Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.304916 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-qjc8f" event={"ID":"173fa938-5908-4b7b-8146-0957272ec6bf","Type":"ContainerDied","Data":"7758b5a24f952ef10592bf9a5131556af4e7edb778aed2bb8ca901a758b5fc2f"} Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.304931 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7758b5a24f952ef10592bf9a5131556af4e7edb778aed2bb8ca901a758b5fc2f" Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.397951 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.540577 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-utilities\") pod \"173fa938-5908-4b7b-8146-0957272ec6bf\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.540898 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-catalog-content\") pod \"173fa938-5908-4b7b-8146-0957272ec6bf\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.541011 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tznzx\" (UniqueName: \"kubernetes.io/projected/173fa938-5908-4b7b-8146-0957272ec6bf-kube-api-access-tznzx\") pod \"173fa938-5908-4b7b-8146-0957272ec6bf\" (UID: \"173fa938-5908-4b7b-8146-0957272ec6bf\") " Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.541973 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-utilities" (OuterVolumeSpecName: "utilities") pod "173fa938-5908-4b7b-8146-0957272ec6bf" (UID: "173fa938-5908-4b7b-8146-0957272ec6bf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.549676 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/173fa938-5908-4b7b-8146-0957272ec6bf-kube-api-access-tznzx" (OuterVolumeSpecName: "kube-api-access-tznzx") pod "173fa938-5908-4b7b-8146-0957272ec6bf" (UID: "173fa938-5908-4b7b-8146-0957272ec6bf"). InnerVolumeSpecName "kube-api-access-tznzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.610132 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "173fa938-5908-4b7b-8146-0957272ec6bf" (UID: "173fa938-5908-4b7b-8146-0957272ec6bf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.646168 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.646229 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tznzx\" (UniqueName: \"kubernetes.io/projected/173fa938-5908-4b7b-8146-0957272ec6bf-kube-api-access-tznzx\") on node \"crc\" DevicePath \"\"" Nov 26 18:14:23 crc kubenswrapper[5010]: I1126 18:14:23.646254 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/173fa938-5908-4b7b-8146-0957272ec6bf-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:14:24 crc kubenswrapper[5010]: I1126 18:14:24.318050 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-qjc8f" Nov 26 18:14:24 crc kubenswrapper[5010]: I1126 18:14:24.362198 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-qjc8f"] Nov 26 18:14:24 crc kubenswrapper[5010]: I1126 18:14:24.376080 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-qjc8f"] Nov 26 18:14:25 crc kubenswrapper[5010]: I1126 18:14:25.905571 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" path="/var/lib/kubelet/pods/173fa938-5908-4b7b-8146-0957272ec6bf/volumes" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.156759 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bzwsh"] Nov 26 18:14:29 crc kubenswrapper[5010]: E1126 18:14:29.160799 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="extract-content" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.160934 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="extract-content" Nov 26 18:14:29 crc kubenswrapper[5010]: E1126 18:14:29.161062 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="extract-utilities" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.161156 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="extract-utilities" Nov 26 18:14:29 crc kubenswrapper[5010]: E1126 18:14:29.161253 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="registry-server" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.161334 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="registry-server" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.161855 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="173fa938-5908-4b7b-8146-0957272ec6bf" containerName="registry-server" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.165315 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.183252 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzwsh"] Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.204889 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-catalog-content\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.204943 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7nx5j\" (UniqueName: \"kubernetes.io/projected/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-kube-api-access-7nx5j\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.205072 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-utilities\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.306885 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-catalog-content\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.306964 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7nx5j\" (UniqueName: \"kubernetes.io/projected/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-kube-api-access-7nx5j\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.307161 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-utilities\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.308235 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-catalog-content\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.309268 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-utilities\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.347349 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7nx5j\" (UniqueName: \"kubernetes.io/projected/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-kube-api-access-7nx5j\") pod \"redhat-marketplace-bzwsh\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:29 crc kubenswrapper[5010]: I1126 18:14:29.501465 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:30 crc kubenswrapper[5010]: I1126 18:14:30.015235 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzwsh"] Nov 26 18:14:30 crc kubenswrapper[5010]: I1126 18:14:30.407472 5010 generic.go:334] "Generic (PLEG): container finished" podID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerID="f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48" exitCode=0 Nov 26 18:14:30 crc kubenswrapper[5010]: I1126 18:14:30.407555 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzwsh" event={"ID":"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640","Type":"ContainerDied","Data":"f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48"} Nov 26 18:14:30 crc kubenswrapper[5010]: I1126 18:14:30.407859 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzwsh" event={"ID":"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640","Type":"ContainerStarted","Data":"92a5c681df0276bd404b84b1fed787548610af8d8d561531042d854b01783e1f"} Nov 26 18:14:32 crc kubenswrapper[5010]: I1126 18:14:32.429336 5010 generic.go:334] "Generic (PLEG): container finished" podID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerID="de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242" exitCode=0 Nov 26 18:14:32 crc kubenswrapper[5010]: I1126 18:14:32.429399 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzwsh" event={"ID":"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640","Type":"ContainerDied","Data":"de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242"} Nov 26 18:14:33 crc kubenswrapper[5010]: I1126 18:14:33.891444 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:14:33 crc kubenswrapper[5010]: E1126 18:14:33.892018 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:14:34 crc kubenswrapper[5010]: I1126 18:14:34.453450 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzwsh" event={"ID":"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640","Type":"ContainerStarted","Data":"30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce"} Nov 26 18:14:34 crc kubenswrapper[5010]: I1126 18:14:34.497437 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bzwsh" podStartSLOduration=2.313713655 podStartE2EDuration="5.497404856s" podCreationTimestamp="2025-11-26 18:14:29 +0000 UTC" firstStartedPulling="2025-11-26 18:14:30.409891339 +0000 UTC m=+10091.200608517" lastFinishedPulling="2025-11-26 18:14:33.59358256 +0000 UTC m=+10094.384299718" observedRunningTime="2025-11-26 18:14:34.486620277 +0000 UTC m=+10095.277337465" watchObservedRunningTime="2025-11-26 18:14:34.497404856 +0000 UTC m=+10095.288122054" Nov 26 18:14:37 crc kubenswrapper[5010]: I1126 18:14:37.055603 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 18:14:37 crc kubenswrapper[5010]: I1126 18:14:37.056372 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/mariadb-copy-data" podUID="bada47cf-95f0-498b-b0e7-0955fb512714" containerName="adoption" containerID="cri-o://91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368" gracePeriod=30 Nov 26 18:14:39 crc kubenswrapper[5010]: I1126 18:14:39.501663 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:39 crc kubenswrapper[5010]: I1126 18:14:39.502069 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:39 crc kubenswrapper[5010]: I1126 18:14:39.593893 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:40 crc kubenswrapper[5010]: I1126 18:14:40.601768 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:40 crc kubenswrapper[5010]: I1126 18:14:40.670140 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzwsh"] Nov 26 18:14:42 crc kubenswrapper[5010]: I1126 18:14:42.546691 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bzwsh" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="registry-server" containerID="cri-o://30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce" gracePeriod=2 Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.030245 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.155215 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7nx5j\" (UniqueName: \"kubernetes.io/projected/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-kube-api-access-7nx5j\") pod \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.155496 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-catalog-content\") pod \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.155671 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-utilities\") pod \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\" (UID: \"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640\") " Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.156553 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-utilities" (OuterVolumeSpecName: "utilities") pod "ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" (UID: "ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.161038 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-kube-api-access-7nx5j" (OuterVolumeSpecName: "kube-api-access-7nx5j") pod "ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" (UID: "ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640"). InnerVolumeSpecName "kube-api-access-7nx5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.192196 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" (UID: "ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.258605 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.258818 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.258909 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7nx5j\" (UniqueName: \"kubernetes.io/projected/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640-kube-api-access-7nx5j\") on node \"crc\" DevicePath \"\"" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.568070 5010 generic.go:334] "Generic (PLEG): container finished" podID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerID="30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce" exitCode=0 Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.568148 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzwsh" event={"ID":"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640","Type":"ContainerDied","Data":"30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce"} Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.568198 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bzwsh" event={"ID":"ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640","Type":"ContainerDied","Data":"92a5c681df0276bd404b84b1fed787548610af8d8d561531042d854b01783e1f"} Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.568238 5010 scope.go:117] "RemoveContainer" containerID="30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.568487 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bzwsh" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.604543 5010 scope.go:117] "RemoveContainer" containerID="de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.618589 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzwsh"] Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.632485 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bzwsh"] Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.637579 5010 scope.go:117] "RemoveContainer" containerID="f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.716210 5010 scope.go:117] "RemoveContainer" containerID="30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce" Nov 26 18:14:43 crc kubenswrapper[5010]: E1126 18:14:43.716657 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce\": container with ID starting with 30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce not found: ID does not exist" containerID="30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.716687 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce"} err="failed to get container status \"30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce\": rpc error: code = NotFound desc = could not find container \"30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce\": container with ID starting with 30699596ebfe34d1adabfb835683c6f5e44748492c9d7a64f259a8f3ea7954ce not found: ID does not exist" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.716724 5010 scope.go:117] "RemoveContainer" containerID="de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242" Nov 26 18:14:43 crc kubenswrapper[5010]: E1126 18:14:43.717157 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242\": container with ID starting with de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242 not found: ID does not exist" containerID="de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.717186 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242"} err="failed to get container status \"de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242\": rpc error: code = NotFound desc = could not find container \"de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242\": container with ID starting with de4b4de3fe542a21092598669b82e760c63617618d570a34d8b7f5da5321d242 not found: ID does not exist" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.717201 5010 scope.go:117] "RemoveContainer" containerID="f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48" Nov 26 18:14:43 crc kubenswrapper[5010]: E1126 18:14:43.717456 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48\": container with ID starting with f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48 not found: ID does not exist" containerID="f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.717485 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48"} err="failed to get container status \"f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48\": rpc error: code = NotFound desc = could not find container \"f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48\": container with ID starting with f209379c1883467854faf91ffbf56a18cf56aa302185b636321584ede0612e48 not found: ID does not exist" Nov 26 18:14:43 crc kubenswrapper[5010]: I1126 18:14:43.907771 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" path="/var/lib/kubelet/pods/ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640/volumes" Nov 26 18:14:47 crc kubenswrapper[5010]: I1126 18:14:47.893501 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:14:47 crc kubenswrapper[5010]: E1126 18:14:47.894841 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:14:58 crc kubenswrapper[5010]: I1126 18:14:58.893208 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:14:58 crc kubenswrapper[5010]: E1126 18:14:58.901109 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.195201 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm"] Nov 26 18:15:00 crc kubenswrapper[5010]: E1126 18:15:00.196520 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="registry-server" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.196562 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="registry-server" Nov 26 18:15:00 crc kubenswrapper[5010]: E1126 18:15:00.196589 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="extract-utilities" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.196602 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="extract-utilities" Nov 26 18:15:00 crc kubenswrapper[5010]: E1126 18:15:00.196656 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="extract-content" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.196669 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="extract-content" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.197275 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecc7f0a4-cfeb-4ce9-9f87-8c2d71635640" containerName="registry-server" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.198242 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.203415 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.203450 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.214789 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm"] Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.307053 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-config-volume\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.307260 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zj9ng\" (UniqueName: \"kubernetes.io/projected/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-kube-api-access-zj9ng\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.307416 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-secret-volume\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.409726 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-secret-volume\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.409830 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-config-volume\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.409936 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zj9ng\" (UniqueName: \"kubernetes.io/projected/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-kube-api-access-zj9ng\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.411029 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-config-volume\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.418689 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-secret-volume\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.432286 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zj9ng\" (UniqueName: \"kubernetes.io/projected/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-kube-api-access-zj9ng\") pod \"collect-profiles-29403015-m5pfm\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:00 crc kubenswrapper[5010]: I1126 18:15:00.535195 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:01 crc kubenswrapper[5010]: I1126 18:15:01.041226 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm"] Nov 26 18:15:01 crc kubenswrapper[5010]: I1126 18:15:01.836968 5010 generic.go:334] "Generic (PLEG): container finished" podID="fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1" containerID="722939995dfa3edd16386b81c5562899041547698e505abb715077dabbb6db1d" exitCode=0 Nov 26 18:15:01 crc kubenswrapper[5010]: I1126 18:15:01.837050 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" event={"ID":"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1","Type":"ContainerDied","Data":"722939995dfa3edd16386b81c5562899041547698e505abb715077dabbb6db1d"} Nov 26 18:15:01 crc kubenswrapper[5010]: I1126 18:15:01.838243 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" event={"ID":"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1","Type":"ContainerStarted","Data":"5618d379827c3a7c52098e1808948bc5199884887e628c22fc9bcc61472086cc"} Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.274416 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.399651 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zj9ng\" (UniqueName: \"kubernetes.io/projected/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-kube-api-access-zj9ng\") pod \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.399735 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-config-volume\") pod \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.399800 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-secret-volume\") pod \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\" (UID: \"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1\") " Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.400748 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-config-volume" (OuterVolumeSpecName: "config-volume") pod "fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1" (UID: "fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.406050 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1" (UID: "fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.406666 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-kube-api-access-zj9ng" (OuterVolumeSpecName: "kube-api-access-zj9ng") pod "fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1" (UID: "fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1"). InnerVolumeSpecName "kube-api-access-zj9ng". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.503288 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zj9ng\" (UniqueName: \"kubernetes.io/projected/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-kube-api-access-zj9ng\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.503322 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.503337 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.868007 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" event={"ID":"fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1","Type":"ContainerDied","Data":"5618d379827c3a7c52098e1808948bc5199884887e628c22fc9bcc61472086cc"} Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.868077 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5618d379827c3a7c52098e1808948bc5199884887e628c22fc9bcc61472086cc" Nov 26 18:15:03 crc kubenswrapper[5010]: I1126 18:15:03.868092 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403015-m5pfm" Nov 26 18:15:04 crc kubenswrapper[5010]: I1126 18:15:04.390499 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9"] Nov 26 18:15:04 crc kubenswrapper[5010]: I1126 18:15:04.406638 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402970-p49p9"] Nov 26 18:15:05 crc kubenswrapper[5010]: I1126 18:15:05.923061 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49347c8e-e9fc-4a14-ba7a-19f5d2401d43" path="/var/lib/kubelet/pods/49347c8e-e9fc-4a14-ba7a-19f5d2401d43/volumes" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.654083 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.812583 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mariadb-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\") pod \"bada47cf-95f0-498b-b0e7-0955fb512714\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.812939 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qc5fw\" (UniqueName: \"kubernetes.io/projected/bada47cf-95f0-498b-b0e7-0955fb512714-kube-api-access-qc5fw\") pod \"bada47cf-95f0-498b-b0e7-0955fb512714\" (UID: \"bada47cf-95f0-498b-b0e7-0955fb512714\") " Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.830645 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bada47cf-95f0-498b-b0e7-0955fb512714-kube-api-access-qc5fw" (OuterVolumeSpecName: "kube-api-access-qc5fw") pod "bada47cf-95f0-498b-b0e7-0955fb512714" (UID: "bada47cf-95f0-498b-b0e7-0955fb512714"). InnerVolumeSpecName "kube-api-access-qc5fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.848213 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98" (OuterVolumeSpecName: "mariadb-data") pod "bada47cf-95f0-498b-b0e7-0955fb512714" (UID: "bada47cf-95f0-498b-b0e7-0955fb512714"). InnerVolumeSpecName "pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.916097 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qc5fw\" (UniqueName: \"kubernetes.io/projected/bada47cf-95f0-498b-b0e7-0955fb512714-kube-api-access-qc5fw\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.916610 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\") on node \"crc\" " Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.929929 5010 generic.go:334] "Generic (PLEG): container finished" podID="bada47cf-95f0-498b-b0e7-0955fb512714" containerID="91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368" exitCode=137 Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.929987 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"bada47cf-95f0-498b-b0e7-0955fb512714","Type":"ContainerDied","Data":"91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368"} Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.930025 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/mariadb-copy-data" event={"ID":"bada47cf-95f0-498b-b0e7-0955fb512714","Type":"ContainerDied","Data":"62faf63544df5ee062f0a15c11488cc2116a33db64f7924f49c8ad8532bea6ac"} Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.930053 5010 scope.go:117] "RemoveContainer" containerID="91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.930234 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/mariadb-copy-data" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.958433 5010 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.958940 5010 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98") on node "crc" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.973422 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.973996 5010 scope.go:117] "RemoveContainer" containerID="91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368" Nov 26 18:15:07 crc kubenswrapper[5010]: E1126 18:15:07.974461 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368\": container with ID starting with 91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368 not found: ID does not exist" containerID="91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.974495 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368"} err="failed to get container status \"91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368\": rpc error: code = NotFound desc = could not find container \"91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368\": container with ID starting with 91be96006f7a9577ef14cd40a2f03b8385873d987820ab028463b89b664e7368 not found: ID does not exist" Nov 26 18:15:07 crc kubenswrapper[5010]: I1126 18:15:07.984132 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/mariadb-copy-data"] Nov 26 18:15:08 crc kubenswrapper[5010]: I1126 18:15:08.019992 5010 reconciler_common.go:293] "Volume detached for volume \"pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-a8ffbce9-82f1-4724-bec9-a96ff66d7e98\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:08 crc kubenswrapper[5010]: I1126 18:15:08.771382 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 18:15:08 crc kubenswrapper[5010]: I1126 18:15:08.772156 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-copy-data" podUID="d0a3cda8-d08e-45d1-865c-208d947680ce" containerName="adoption" containerID="cri-o://efe3d255e19a61263284d468fe31ad92be30aa318ef10c050ad6053201e2593d" gracePeriod=30 Nov 26 18:15:09 crc kubenswrapper[5010]: I1126 18:15:09.911841 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bada47cf-95f0-498b-b0e7-0955fb512714" path="/var/lib/kubelet/pods/bada47cf-95f0-498b-b0e7-0955fb512714/volumes" Nov 26 18:15:11 crc kubenswrapper[5010]: I1126 18:15:11.892234 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:15:13 crc kubenswrapper[5010]: I1126 18:15:13.004006 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"67fa82ceefb0a9927b9de7df801e74f94dbff907b23d1a7863fb504d3662687f"} Nov 26 18:15:14 crc kubenswrapper[5010]: I1126 18:15:14.114926 5010 scope.go:117] "RemoveContainer" containerID="483128961b2824cebffbca5da39bdba86d047947402217024418b48cbc18856c" Nov 26 18:15:39 crc kubenswrapper[5010]: I1126 18:15:39.318350 5010 generic.go:334] "Generic (PLEG): container finished" podID="d0a3cda8-d08e-45d1-865c-208d947680ce" containerID="efe3d255e19a61263284d468fe31ad92be30aa318ef10c050ad6053201e2593d" exitCode=137 Nov 26 18:15:39 crc kubenswrapper[5010]: I1126 18:15:39.318438 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d0a3cda8-d08e-45d1-865c-208d947680ce","Type":"ContainerDied","Data":"efe3d255e19a61263284d468fe31ad92be30aa318ef10c050ad6053201e2593d"} Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.023619 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.119203 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fkxpr\" (UniqueName: \"kubernetes.io/projected/d0a3cda8-d08e-45d1-865c-208d947680ce-kube-api-access-fkxpr\") pod \"d0a3cda8-d08e-45d1-865c-208d947680ce\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.119511 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d0a3cda8-d08e-45d1-865c-208d947680ce-ovn-data-cert\") pod \"d0a3cda8-d08e-45d1-865c-208d947680ce\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.120303 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-data\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a\") pod \"d0a3cda8-d08e-45d1-865c-208d947680ce\" (UID: \"d0a3cda8-d08e-45d1-865c-208d947680ce\") " Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.126675 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0a3cda8-d08e-45d1-865c-208d947680ce-kube-api-access-fkxpr" (OuterVolumeSpecName: "kube-api-access-fkxpr") pod "d0a3cda8-d08e-45d1-865c-208d947680ce" (UID: "d0a3cda8-d08e-45d1-865c-208d947680ce"). InnerVolumeSpecName "kube-api-access-fkxpr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.128530 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0a3cda8-d08e-45d1-865c-208d947680ce-ovn-data-cert" (OuterVolumeSpecName: "ovn-data-cert") pod "d0a3cda8-d08e-45d1-865c-208d947680ce" (UID: "d0a3cda8-d08e-45d1-865c-208d947680ce"). InnerVolumeSpecName "ovn-data-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.152690 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a" (OuterVolumeSpecName: "ovn-data") pod "d0a3cda8-d08e-45d1-865c-208d947680ce" (UID: "d0a3cda8-d08e-45d1-865c-208d947680ce"). InnerVolumeSpecName "pvc-d120f006-30d3-481f-a395-7f76aa95832a". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.224076 5010 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"pvc-d120f006-30d3-481f-a395-7f76aa95832a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a\") on node \"crc\" " Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.224127 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fkxpr\" (UniqueName: \"kubernetes.io/projected/d0a3cda8-d08e-45d1-865c-208d947680ce-kube-api-access-fkxpr\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.224149 5010 reconciler_common.go:293] "Volume detached for volume \"ovn-data-cert\" (UniqueName: \"kubernetes.io/secret/d0a3cda8-d08e-45d1-865c-208d947680ce-ovn-data-cert\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.278822 5010 csi_attacher.go:630] kubernetes.io/csi: attacher.UnmountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping UnmountDevice... Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.279277 5010 operation_generator.go:917] UnmountDevice succeeded for volume "pvc-d120f006-30d3-481f-a395-7f76aa95832a" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a") on node "crc" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.326247 5010 reconciler_common.go:293] "Volume detached for volume \"pvc-d120f006-30d3-481f-a395-7f76aa95832a\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-d120f006-30d3-481f-a395-7f76aa95832a\") on node \"crc\" DevicePath \"\"" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.332206 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-copy-data" event={"ID":"d0a3cda8-d08e-45d1-865c-208d947680ce","Type":"ContainerDied","Data":"e109c570543ee92c7d62b0fb40661ec30e33c90bc660bf3c735c549b4e8c2d55"} Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.332315 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-copy-data" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.332373 5010 scope.go:117] "RemoveContainer" containerID="efe3d255e19a61263284d468fe31ad92be30aa318ef10c050ad6053201e2593d" Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.406065 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 18:15:40 crc kubenswrapper[5010]: I1126 18:15:40.423790 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-copy-data"] Nov 26 18:15:41 crc kubenswrapper[5010]: I1126 18:15:41.919109 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0a3cda8-d08e-45d1-865c-208d947680ce" path="/var/lib/kubelet/pods/d0a3cda8-d08e-45d1-865c-208d947680ce/volumes" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.689931 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-vz65b"] Nov 26 18:15:42 crc kubenswrapper[5010]: E1126 18:15:42.691152 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0a3cda8-d08e-45d1-865c-208d947680ce" containerName="adoption" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.691183 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0a3cda8-d08e-45d1-865c-208d947680ce" containerName="adoption" Nov 26 18:15:42 crc kubenswrapper[5010]: E1126 18:15:42.691212 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1" containerName="collect-profiles" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.691223 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1" containerName="collect-profiles" Nov 26 18:15:42 crc kubenswrapper[5010]: E1126 18:15:42.691261 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bada47cf-95f0-498b-b0e7-0955fb512714" containerName="adoption" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.691272 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="bada47cf-95f0-498b-b0e7-0955fb512714" containerName="adoption" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.691583 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="bada47cf-95f0-498b-b0e7-0955fb512714" containerName="adoption" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.691626 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0a3cda8-d08e-45d1-865c-208d947680ce" containerName="adoption" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.691645 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc4aafd4-d2a2-43c5-8bfc-255c4f9643e1" containerName="collect-profiles" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.693978 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.722846 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vz65b"] Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.782251 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5a07d99-8c81-4356-86f0-b46c64547843-utilities\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.782408 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5a07d99-8c81-4356-86f0-b46c64547843-catalog-content\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.782540 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrwz8\" (UniqueName: \"kubernetes.io/projected/c5a07d99-8c81-4356-86f0-b46c64547843-kube-api-access-qrwz8\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.883768 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5a07d99-8c81-4356-86f0-b46c64547843-utilities\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.883871 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5a07d99-8c81-4356-86f0-b46c64547843-catalog-content\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.883992 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrwz8\" (UniqueName: \"kubernetes.io/projected/c5a07d99-8c81-4356-86f0-b46c64547843-kube-api-access-qrwz8\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.884262 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c5a07d99-8c81-4356-86f0-b46c64547843-utilities\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.884525 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c5a07d99-8c81-4356-86f0-b46c64547843-catalog-content\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:42 crc kubenswrapper[5010]: I1126 18:15:42.905518 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrwz8\" (UniqueName: \"kubernetes.io/projected/c5a07d99-8c81-4356-86f0-b46c64547843-kube-api-access-qrwz8\") pod \"certified-operators-vz65b\" (UID: \"c5a07d99-8c81-4356-86f0-b46c64547843\") " pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:43 crc kubenswrapper[5010]: I1126 18:15:43.021068 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:43 crc kubenswrapper[5010]: I1126 18:15:43.382947 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vz65b"] Nov 26 18:15:44 crc kubenswrapper[5010]: I1126 18:15:44.384222 5010 generic.go:334] "Generic (PLEG): container finished" podID="c5a07d99-8c81-4356-86f0-b46c64547843" containerID="94bd9b5ff0080ecdc399e6cc1b910b725ae5cd6b3d5ef50240438c7e3c21f4b6" exitCode=0 Nov 26 18:15:44 crc kubenswrapper[5010]: I1126 18:15:44.384488 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65b" event={"ID":"c5a07d99-8c81-4356-86f0-b46c64547843","Type":"ContainerDied","Data":"94bd9b5ff0080ecdc399e6cc1b910b725ae5cd6b3d5ef50240438c7e3c21f4b6"} Nov 26 18:15:44 crc kubenswrapper[5010]: I1126 18:15:44.384514 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65b" event={"ID":"c5a07d99-8c81-4356-86f0-b46c64547843","Type":"ContainerStarted","Data":"ecec7f79e158ae22219c559f5849e4d8b26b1dbc7c86177c598ad0d3b2c1634f"} Nov 26 18:15:50 crc kubenswrapper[5010]: I1126 18:15:50.456408 5010 generic.go:334] "Generic (PLEG): container finished" podID="c5a07d99-8c81-4356-86f0-b46c64547843" containerID="15ee70e010b6cae4d5bcb957f9003ad14cf2c7e1fc7d1c7916eae69c8d0e8f47" exitCode=0 Nov 26 18:15:50 crc kubenswrapper[5010]: I1126 18:15:50.456572 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65b" event={"ID":"c5a07d99-8c81-4356-86f0-b46c64547843","Type":"ContainerDied","Data":"15ee70e010b6cae4d5bcb957f9003ad14cf2c7e1fc7d1c7916eae69c8d0e8f47"} Nov 26 18:15:51 crc kubenswrapper[5010]: I1126 18:15:51.469221 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-vz65b" event={"ID":"c5a07d99-8c81-4356-86f0-b46c64547843","Type":"ContainerStarted","Data":"f7619a90a4d1ea1f45e7f434b0ce9792dd469a3f6f7da4b84891400406dec750"} Nov 26 18:15:51 crc kubenswrapper[5010]: I1126 18:15:51.498940 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-vz65b" podStartSLOduration=2.897344662 podStartE2EDuration="9.498919643s" podCreationTimestamp="2025-11-26 18:15:42 +0000 UTC" firstStartedPulling="2025-11-26 18:15:44.386867738 +0000 UTC m=+10165.177584886" lastFinishedPulling="2025-11-26 18:15:50.988442709 +0000 UTC m=+10171.779159867" observedRunningTime="2025-11-26 18:15:51.492823902 +0000 UTC m=+10172.283541090" watchObservedRunningTime="2025-11-26 18:15:51.498919643 +0000 UTC m=+10172.289636781" Nov 26 18:15:53 crc kubenswrapper[5010]: I1126 18:15:53.023228 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:53 crc kubenswrapper[5010]: I1126 18:15:53.023891 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:15:53 crc kubenswrapper[5010]: I1126 18:15:53.134595 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.098159 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-vz65b" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.197814 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-vz65b"] Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.241944 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c5tp8"] Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.242199 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-c5tp8" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="registry-server" containerID="cri-o://288aae808187f79aa64c666ecf468e66f878c5a7eafd47c2c0e8d88be5c363c5" gracePeriod=2 Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.622172 5010 generic.go:334] "Generic (PLEG): container finished" podID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerID="288aae808187f79aa64c666ecf468e66f878c5a7eafd47c2c0e8d88be5c363c5" exitCode=0 Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.622246 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5tp8" event={"ID":"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff","Type":"ContainerDied","Data":"288aae808187f79aa64c666ecf468e66f878c5a7eafd47c2c0e8d88be5c363c5"} Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.718513 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.835488 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdzk4\" (UniqueName: \"kubernetes.io/projected/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-kube-api-access-jdzk4\") pod \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.835702 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-catalog-content\") pod \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.835805 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-utilities\") pod \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\" (UID: \"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff\") " Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.836491 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-utilities" (OuterVolumeSpecName: "utilities") pod "60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" (UID: "60bcf0c1-e521-4815-9f8a-da9f5e6bcdff"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.836840 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.843012 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-kube-api-access-jdzk4" (OuterVolumeSpecName: "kube-api-access-jdzk4") pod "60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" (UID: "60bcf0c1-e521-4815-9f8a-da9f5e6bcdff"). InnerVolumeSpecName "kube-api-access-jdzk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.882989 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" (UID: "60bcf0c1-e521-4815-9f8a-da9f5e6bcdff"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.939249 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdzk4\" (UniqueName: \"kubernetes.io/projected/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-kube-api-access-jdzk4\") on node \"crc\" DevicePath \"\"" Nov 26 18:16:03 crc kubenswrapper[5010]: I1126 18:16:03.939277 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:16:04 crc kubenswrapper[5010]: I1126 18:16:04.633010 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-c5tp8" event={"ID":"60bcf0c1-e521-4815-9f8a-da9f5e6bcdff","Type":"ContainerDied","Data":"e97c4bea1fa10a2b8ff945f8d29bad8ad30df27667e054fef19636dd1d2b668a"} Nov 26 18:16:04 crc kubenswrapper[5010]: I1126 18:16:04.633365 5010 scope.go:117] "RemoveContainer" containerID="288aae808187f79aa64c666ecf468e66f878c5a7eafd47c2c0e8d88be5c363c5" Nov 26 18:16:04 crc kubenswrapper[5010]: I1126 18:16:04.633246 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-c5tp8" Nov 26 18:16:04 crc kubenswrapper[5010]: I1126 18:16:04.656748 5010 scope.go:117] "RemoveContainer" containerID="bfd2f125e4bdbd183239e3310fb05999eef199d051a32d967dba998a113c90fc" Nov 26 18:16:04 crc kubenswrapper[5010]: I1126 18:16:04.658621 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-c5tp8"] Nov 26 18:16:04 crc kubenswrapper[5010]: I1126 18:16:04.673470 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-c5tp8"] Nov 26 18:16:04 crc kubenswrapper[5010]: I1126 18:16:04.682195 5010 scope.go:117] "RemoveContainer" containerID="f87fea71da75ffcabe23d2b86bb2e2a077393fc3b17b9e48e9795066eb3dcbab" Nov 26 18:16:05 crc kubenswrapper[5010]: I1126 18:16:05.906542 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" path="/var/lib/kubelet/pods/60bcf0c1-e521-4815-9f8a-da9f5e6bcdff/volumes" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.561631 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-b75rl"] Nov 26 18:16:09 crc kubenswrapper[5010]: E1126 18:16:09.562784 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="extract-content" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.562800 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="extract-content" Nov 26 18:16:09 crc kubenswrapper[5010]: E1126 18:16:09.562834 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="extract-utilities" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.562842 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="extract-utilities" Nov 26 18:16:09 crc kubenswrapper[5010]: E1126 18:16:09.562863 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="registry-server" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.562870 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="registry-server" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.563147 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="60bcf0c1-e521-4815-9f8a-da9f5e6bcdff" containerName="registry-server" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.565267 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.604498 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b75rl"] Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.675229 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-utilities\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.675331 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qb2ww\" (UniqueName: \"kubernetes.io/projected/6cce37b6-1950-43a6-b20f-127fbbd67cdc-kube-api-access-qb2ww\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.675450 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-catalog-content\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.777153 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-catalog-content\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.777310 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-utilities\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.777375 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qb2ww\" (UniqueName: \"kubernetes.io/projected/6cce37b6-1950-43a6-b20f-127fbbd67cdc-kube-api-access-qb2ww\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.778243 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-catalog-content\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.778554 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-utilities\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.822788 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qb2ww\" (UniqueName: \"kubernetes.io/projected/6cce37b6-1950-43a6-b20f-127fbbd67cdc-kube-api-access-qb2ww\") pod \"redhat-operators-b75rl\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:09 crc kubenswrapper[5010]: I1126 18:16:09.899186 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:10 crc kubenswrapper[5010]: I1126 18:16:10.466422 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-b75rl"] Nov 26 18:16:10 crc kubenswrapper[5010]: I1126 18:16:10.724227 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b75rl" event={"ID":"6cce37b6-1950-43a6-b20f-127fbbd67cdc","Type":"ContainerStarted","Data":"3984cd3d840d43bed774f0408bea7dfce7c850d6c3c3c4112be19e83c701c350"} Nov 26 18:16:11 crc kubenswrapper[5010]: I1126 18:16:11.743030 5010 generic.go:334] "Generic (PLEG): container finished" podID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerID="c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0" exitCode=0 Nov 26 18:16:11 crc kubenswrapper[5010]: I1126 18:16:11.743100 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b75rl" event={"ID":"6cce37b6-1950-43a6-b20f-127fbbd67cdc","Type":"ContainerDied","Data":"c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0"} Nov 26 18:16:13 crc kubenswrapper[5010]: I1126 18:16:13.771207 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b75rl" event={"ID":"6cce37b6-1950-43a6-b20f-127fbbd67cdc","Type":"ContainerStarted","Data":"1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea"} Nov 26 18:16:16 crc kubenswrapper[5010]: I1126 18:16:16.868014 5010 generic.go:334] "Generic (PLEG): container finished" podID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerID="1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea" exitCode=0 Nov 26 18:16:16 crc kubenswrapper[5010]: I1126 18:16:16.868120 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b75rl" event={"ID":"6cce37b6-1950-43a6-b20f-127fbbd67cdc","Type":"ContainerDied","Data":"1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea"} Nov 26 18:16:19 crc kubenswrapper[5010]: I1126 18:16:19.948170 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b75rl" event={"ID":"6cce37b6-1950-43a6-b20f-127fbbd67cdc","Type":"ContainerStarted","Data":"05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188"} Nov 26 18:16:19 crc kubenswrapper[5010]: I1126 18:16:19.984219 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-b75rl" podStartSLOduration=3.967058041 podStartE2EDuration="10.984192048s" podCreationTimestamp="2025-11-26 18:16:09 +0000 UTC" firstStartedPulling="2025-11-26 18:16:11.747279991 +0000 UTC m=+10192.537997149" lastFinishedPulling="2025-11-26 18:16:18.764413968 +0000 UTC m=+10199.555131156" observedRunningTime="2025-11-26 18:16:19.981932892 +0000 UTC m=+10200.772650070" watchObservedRunningTime="2025-11-26 18:16:19.984192048 +0000 UTC m=+10200.774909226" Nov 26 18:16:29 crc kubenswrapper[5010]: I1126 18:16:29.916882 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:29 crc kubenswrapper[5010]: I1126 18:16:29.917604 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:29 crc kubenswrapper[5010]: I1126 18:16:29.994444 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:30 crc kubenswrapper[5010]: I1126 18:16:30.155532 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:30 crc kubenswrapper[5010]: I1126 18:16:30.255751 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b75rl"] Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.128583 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-b75rl" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="registry-server" containerID="cri-o://05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188" gracePeriod=2 Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.692620 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.761412 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-utilities\") pod \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.761606 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qb2ww\" (UniqueName: \"kubernetes.io/projected/6cce37b6-1950-43a6-b20f-127fbbd67cdc-kube-api-access-qb2ww\") pod \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.761763 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-catalog-content\") pod \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\" (UID: \"6cce37b6-1950-43a6-b20f-127fbbd67cdc\") " Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.764056 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-utilities" (OuterVolumeSpecName: "utilities") pod "6cce37b6-1950-43a6-b20f-127fbbd67cdc" (UID: "6cce37b6-1950-43a6-b20f-127fbbd67cdc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.780752 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6cce37b6-1950-43a6-b20f-127fbbd67cdc-kube-api-access-qb2ww" (OuterVolumeSpecName: "kube-api-access-qb2ww") pod "6cce37b6-1950-43a6-b20f-127fbbd67cdc" (UID: "6cce37b6-1950-43a6-b20f-127fbbd67cdc"). InnerVolumeSpecName "kube-api-access-qb2ww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.864504 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qb2ww\" (UniqueName: \"kubernetes.io/projected/6cce37b6-1950-43a6-b20f-127fbbd67cdc-kube-api-access-qb2ww\") on node \"crc\" DevicePath \"\"" Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.864536 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.877862 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6cce37b6-1950-43a6-b20f-127fbbd67cdc" (UID: "6cce37b6-1950-43a6-b20f-127fbbd67cdc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:16:32 crc kubenswrapper[5010]: I1126 18:16:32.965341 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6cce37b6-1950-43a6-b20f-127fbbd67cdc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.140352 5010 generic.go:334] "Generic (PLEG): container finished" podID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerID="05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188" exitCode=0 Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.140406 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b75rl" event={"ID":"6cce37b6-1950-43a6-b20f-127fbbd67cdc","Type":"ContainerDied","Data":"05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188"} Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.140471 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-b75rl" event={"ID":"6cce37b6-1950-43a6-b20f-127fbbd67cdc","Type":"ContainerDied","Data":"3984cd3d840d43bed774f0408bea7dfce7c850d6c3c3c4112be19e83c701c350"} Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.140496 5010 scope.go:117] "RemoveContainer" containerID="05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.140510 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-b75rl" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.170052 5010 scope.go:117] "RemoveContainer" containerID="1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.201065 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-b75rl"] Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.215364 5010 scope.go:117] "RemoveContainer" containerID="c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.220583 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-b75rl"] Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.254766 5010 scope.go:117] "RemoveContainer" containerID="05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188" Nov 26 18:16:33 crc kubenswrapper[5010]: E1126 18:16:33.258453 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188\": container with ID starting with 05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188 not found: ID does not exist" containerID="05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.258507 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188"} err="failed to get container status \"05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188\": rpc error: code = NotFound desc = could not find container \"05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188\": container with ID starting with 05a36fc8c1ebb56611ffc1f301d3d6e9873e8ecc91bb280c2ba221c43d819188 not found: ID does not exist" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.258546 5010 scope.go:117] "RemoveContainer" containerID="1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea" Nov 26 18:16:33 crc kubenswrapper[5010]: E1126 18:16:33.259024 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea\": container with ID starting with 1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea not found: ID does not exist" containerID="1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.259074 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea"} err="failed to get container status \"1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea\": rpc error: code = NotFound desc = could not find container \"1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea\": container with ID starting with 1c713366ac13da64241f1297ded577d9bac667f90b981b7df939c2c40efe3eea not found: ID does not exist" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.259109 5010 scope.go:117] "RemoveContainer" containerID="c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0" Nov 26 18:16:33 crc kubenswrapper[5010]: E1126 18:16:33.259706 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0\": container with ID starting with c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0 not found: ID does not exist" containerID="c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.259783 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0"} err="failed to get container status \"c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0\": rpc error: code = NotFound desc = could not find container \"c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0\": container with ID starting with c95e9486618d21250368e74e3b63db9ff8a41868f849c4fcc0edbd2bc5c795d0 not found: ID does not exist" Nov 26 18:16:33 crc kubenswrapper[5010]: I1126 18:16:33.904503 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" path="/var/lib/kubelet/pods/6cce37b6-1950-43a6-b20f-127fbbd67cdc/volumes" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.251502 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-djwnc/must-gather-rxfw4"] Nov 26 18:16:50 crc kubenswrapper[5010]: E1126 18:16:50.252380 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="extract-utilities" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.252398 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="extract-utilities" Nov 26 18:16:50 crc kubenswrapper[5010]: E1126 18:16:50.252423 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="extract-content" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.252430 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="extract-content" Nov 26 18:16:50 crc kubenswrapper[5010]: E1126 18:16:50.252455 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="registry-server" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.252461 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="registry-server" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.252682 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="6cce37b6-1950-43a6-b20f-127fbbd67cdc" containerName="registry-server" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.253942 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.255972 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-djwnc"/"default-dockercfg-x7gnf" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.256467 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-djwnc"/"openshift-service-ca.crt" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.256749 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-djwnc"/"kube-root-ca.crt" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.263483 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-djwnc/must-gather-rxfw4"] Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.346291 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/70abf141-daf8-4b48-90c5-534d8de204ed-must-gather-output\") pod \"must-gather-rxfw4\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.346366 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xr9cg\" (UniqueName: \"kubernetes.io/projected/70abf141-daf8-4b48-90c5-534d8de204ed-kube-api-access-xr9cg\") pod \"must-gather-rxfw4\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.448173 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/70abf141-daf8-4b48-90c5-534d8de204ed-must-gather-output\") pod \"must-gather-rxfw4\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.448228 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xr9cg\" (UniqueName: \"kubernetes.io/projected/70abf141-daf8-4b48-90c5-534d8de204ed-kube-api-access-xr9cg\") pod \"must-gather-rxfw4\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.448652 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/70abf141-daf8-4b48-90c5-534d8de204ed-must-gather-output\") pod \"must-gather-rxfw4\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.473390 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xr9cg\" (UniqueName: \"kubernetes.io/projected/70abf141-daf8-4b48-90c5-534d8de204ed-kube-api-access-xr9cg\") pod \"must-gather-rxfw4\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:50 crc kubenswrapper[5010]: I1126 18:16:50.572348 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:16:51 crc kubenswrapper[5010]: I1126 18:16:51.067928 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-djwnc/must-gather-rxfw4"] Nov 26 18:16:51 crc kubenswrapper[5010]: I1126 18:16:51.370799 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/must-gather-rxfw4" event={"ID":"70abf141-daf8-4b48-90c5-534d8de204ed","Type":"ContainerStarted","Data":"8f442c40b46db1d20bfd30adb317d734095939022e5491a7fabcc384fddd2b98"} Nov 26 18:16:56 crc kubenswrapper[5010]: I1126 18:16:56.431664 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/must-gather-rxfw4" event={"ID":"70abf141-daf8-4b48-90c5-534d8de204ed","Type":"ContainerStarted","Data":"e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1"} Nov 26 18:16:57 crc kubenswrapper[5010]: I1126 18:16:57.445967 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/must-gather-rxfw4" event={"ID":"70abf141-daf8-4b48-90c5-534d8de204ed","Type":"ContainerStarted","Data":"92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59"} Nov 26 18:16:57 crc kubenswrapper[5010]: I1126 18:16:57.476447 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-djwnc/must-gather-rxfw4" podStartSLOduration=2.830741026 podStartE2EDuration="7.476426465s" podCreationTimestamp="2025-11-26 18:16:50 +0000 UTC" firstStartedPulling="2025-11-26 18:16:51.058094897 +0000 UTC m=+10231.848812045" lastFinishedPulling="2025-11-26 18:16:55.703780316 +0000 UTC m=+10236.494497484" observedRunningTime="2025-11-26 18:16:57.461768861 +0000 UTC m=+10238.252486059" watchObservedRunningTime="2025-11-26 18:16:57.476426465 +0000 UTC m=+10238.267143623" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.155350 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-djwnc/crc-debug-ztg7p"] Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.157058 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.293990 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8120d3e3-4892-409d-a576-78ef194f3caa-host\") pod \"crc-debug-ztg7p\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.294546 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7565n\" (UniqueName: \"kubernetes.io/projected/8120d3e3-4892-409d-a576-78ef194f3caa-kube-api-access-7565n\") pod \"crc-debug-ztg7p\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.398019 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7565n\" (UniqueName: \"kubernetes.io/projected/8120d3e3-4892-409d-a576-78ef194f3caa-kube-api-access-7565n\") pod \"crc-debug-ztg7p\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.398552 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8120d3e3-4892-409d-a576-78ef194f3caa-host\") pod \"crc-debug-ztg7p\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.398765 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8120d3e3-4892-409d-a576-78ef194f3caa-host\") pod \"crc-debug-ztg7p\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.428603 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7565n\" (UniqueName: \"kubernetes.io/projected/8120d3e3-4892-409d-a576-78ef194f3caa-kube-api-access-7565n\") pod \"crc-debug-ztg7p\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:01 crc kubenswrapper[5010]: I1126 18:17:01.479551 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:02 crc kubenswrapper[5010]: I1126 18:17:02.513128 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" event={"ID":"8120d3e3-4892-409d-a576-78ef194f3caa","Type":"ContainerStarted","Data":"c15d3ef39296b4ceb121b3575db1af3c3029aff55e9b4044dd076210154dfb29"} Nov 26 18:17:13 crc kubenswrapper[5010]: I1126 18:17:13.629988 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" event={"ID":"8120d3e3-4892-409d-a576-78ef194f3caa","Type":"ContainerStarted","Data":"c38f97a7b1ce224321699aac83c10cd815a4fae9f735a6ae670d49a0d72b5cf7"} Nov 26 18:17:13 crc kubenswrapper[5010]: I1126 18:17:13.658155 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" podStartSLOduration=1.647037107 podStartE2EDuration="12.658098539s" podCreationTimestamp="2025-11-26 18:17:01 +0000 UTC" firstStartedPulling="2025-11-26 18:17:01.536037841 +0000 UTC m=+10242.326755009" lastFinishedPulling="2025-11-26 18:17:12.547099293 +0000 UTC m=+10253.337816441" observedRunningTime="2025-11-26 18:17:13.645441945 +0000 UTC m=+10254.436159123" watchObservedRunningTime="2025-11-26 18:17:13.658098539 +0000 UTC m=+10254.448815697" Nov 26 18:17:41 crc kubenswrapper[5010]: I1126 18:17:41.422528 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:17:41 crc kubenswrapper[5010]: I1126 18:17:41.422940 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:17:58 crc kubenswrapper[5010]: E1126 18:17:58.023493 5010 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8120d3e3_4892_409d_a576_78ef194f3caa.slice/crio-conmon-c38f97a7b1ce224321699aac83c10cd815a4fae9f735a6ae670d49a0d72b5cf7.scope\": RecentStats: unable to find data in memory cache]" Nov 26 18:17:58 crc kubenswrapper[5010]: I1126 18:17:58.088869 5010 generic.go:334] "Generic (PLEG): container finished" podID="8120d3e3-4892-409d-a576-78ef194f3caa" containerID="c38f97a7b1ce224321699aac83c10cd815a4fae9f735a6ae670d49a0d72b5cf7" exitCode=0 Nov 26 18:17:58 crc kubenswrapper[5010]: I1126 18:17:58.088917 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" event={"ID":"8120d3e3-4892-409d-a576-78ef194f3caa","Type":"ContainerDied","Data":"c38f97a7b1ce224321699aac83c10cd815a4fae9f735a6ae670d49a0d72b5cf7"} Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.240900 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.272524 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-djwnc/crc-debug-ztg7p"] Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.283525 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-djwnc/crc-debug-ztg7p"] Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.335025 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8120d3e3-4892-409d-a576-78ef194f3caa-host\") pod \"8120d3e3-4892-409d-a576-78ef194f3caa\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.335190 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7565n\" (UniqueName: \"kubernetes.io/projected/8120d3e3-4892-409d-a576-78ef194f3caa-kube-api-access-7565n\") pod \"8120d3e3-4892-409d-a576-78ef194f3caa\" (UID: \"8120d3e3-4892-409d-a576-78ef194f3caa\") " Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.335355 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8120d3e3-4892-409d-a576-78ef194f3caa-host" (OuterVolumeSpecName: "host") pod "8120d3e3-4892-409d-a576-78ef194f3caa" (UID: "8120d3e3-4892-409d-a576-78ef194f3caa"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.336844 5010 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8120d3e3-4892-409d-a576-78ef194f3caa-host\") on node \"crc\" DevicePath \"\"" Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.343226 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8120d3e3-4892-409d-a576-78ef194f3caa-kube-api-access-7565n" (OuterVolumeSpecName: "kube-api-access-7565n") pod "8120d3e3-4892-409d-a576-78ef194f3caa" (UID: "8120d3e3-4892-409d-a576-78ef194f3caa"). InnerVolumeSpecName "kube-api-access-7565n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.439319 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7565n\" (UniqueName: \"kubernetes.io/projected/8120d3e3-4892-409d-a576-78ef194f3caa-kube-api-access-7565n\") on node \"crc\" DevicePath \"\"" Nov 26 18:17:59 crc kubenswrapper[5010]: I1126 18:17:59.908095 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8120d3e3-4892-409d-a576-78ef194f3caa" path="/var/lib/kubelet/pods/8120d3e3-4892-409d-a576-78ef194f3caa/volumes" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.114149 5010 scope.go:117] "RemoveContainer" containerID="c38f97a7b1ce224321699aac83c10cd815a4fae9f735a6ae670d49a0d72b5cf7" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.114195 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-ztg7p" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.507476 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-djwnc/crc-debug-sqc6w"] Nov 26 18:18:00 crc kubenswrapper[5010]: E1126 18:18:00.507933 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8120d3e3-4892-409d-a576-78ef194f3caa" containerName="container-00" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.507945 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="8120d3e3-4892-409d-a576-78ef194f3caa" containerName="container-00" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.508135 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="8120d3e3-4892-409d-a576-78ef194f3caa" containerName="container-00" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.508880 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.577457 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnr4m\" (UniqueName: \"kubernetes.io/projected/5a1c1e92-d7ad-4186-91d0-9289605b8470-kube-api-access-gnr4m\") pod \"crc-debug-sqc6w\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.577642 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a1c1e92-d7ad-4186-91d0-9289605b8470-host\") pod \"crc-debug-sqc6w\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.679331 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a1c1e92-d7ad-4186-91d0-9289605b8470-host\") pod \"crc-debug-sqc6w\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.679478 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a1c1e92-d7ad-4186-91d0-9289605b8470-host\") pod \"crc-debug-sqc6w\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:00 crc kubenswrapper[5010]: I1126 18:18:00.679493 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnr4m\" (UniqueName: \"kubernetes.io/projected/5a1c1e92-d7ad-4186-91d0-9289605b8470-kube-api-access-gnr4m\") pod \"crc-debug-sqc6w\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:01 crc kubenswrapper[5010]: I1126 18:18:01.507584 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnr4m\" (UniqueName: \"kubernetes.io/projected/5a1c1e92-d7ad-4186-91d0-9289605b8470-kube-api-access-gnr4m\") pod \"crc-debug-sqc6w\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:01 crc kubenswrapper[5010]: I1126 18:18:01.730480 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:02 crc kubenswrapper[5010]: I1126 18:18:02.179782 5010 generic.go:334] "Generic (PLEG): container finished" podID="5a1c1e92-d7ad-4186-91d0-9289605b8470" containerID="fc0af1701681bf6140cc2b14c620f28d3d2d459b5dc688b96bc52f2825a44a0c" exitCode=0 Nov 26 18:18:02 crc kubenswrapper[5010]: I1126 18:18:02.179854 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/crc-debug-sqc6w" event={"ID":"5a1c1e92-d7ad-4186-91d0-9289605b8470","Type":"ContainerDied","Data":"fc0af1701681bf6140cc2b14c620f28d3d2d459b5dc688b96bc52f2825a44a0c"} Nov 26 18:18:02 crc kubenswrapper[5010]: I1126 18:18:02.180170 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/crc-debug-sqc6w" event={"ID":"5a1c1e92-d7ad-4186-91d0-9289605b8470","Type":"ContainerStarted","Data":"1bde8c15e0b9076ea25fc04a1e3d5dcaa3beff77201879d0aeb76909c50cd57d"} Nov 26 18:18:02 crc kubenswrapper[5010]: I1126 18:18:02.615920 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-djwnc/crc-debug-sqc6w"] Nov 26 18:18:02 crc kubenswrapper[5010]: I1126 18:18:02.627018 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-djwnc/crc-debug-sqc6w"] Nov 26 18:18:03 crc kubenswrapper[5010]: I1126 18:18:03.887121 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-djwnc/crc-debug-4xbjl"] Nov 26 18:18:03 crc kubenswrapper[5010]: E1126 18:18:03.888581 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a1c1e92-d7ad-4186-91d0-9289605b8470" containerName="container-00" Nov 26 18:18:03 crc kubenswrapper[5010]: I1126 18:18:03.888661 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a1c1e92-d7ad-4186-91d0-9289605b8470" containerName="container-00" Nov 26 18:18:03 crc kubenswrapper[5010]: I1126 18:18:03.888931 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a1c1e92-d7ad-4186-91d0-9289605b8470" containerName="container-00" Nov 26 18:18:03 crc kubenswrapper[5010]: I1126 18:18:03.889830 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:03 crc kubenswrapper[5010]: I1126 18:18:03.967440 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9brc\" (UniqueName: \"kubernetes.io/projected/d1d6dbe5-8328-42cf-878e-ad70b3144799-kube-api-access-s9brc\") pod \"crc-debug-4xbjl\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:03 crc kubenswrapper[5010]: I1126 18:18:03.967500 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d1d6dbe5-8328-42cf-878e-ad70b3144799-host\") pod \"crc-debug-4xbjl\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.046673 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.069036 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a1c1e92-d7ad-4186-91d0-9289605b8470-host\") pod \"5a1c1e92-d7ad-4186-91d0-9289605b8470\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.069140 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5a1c1e92-d7ad-4186-91d0-9289605b8470-host" (OuterVolumeSpecName: "host") pod "5a1c1e92-d7ad-4186-91d0-9289605b8470" (UID: "5a1c1e92-d7ad-4186-91d0-9289605b8470"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.069387 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnr4m\" (UniqueName: \"kubernetes.io/projected/5a1c1e92-d7ad-4186-91d0-9289605b8470-kube-api-access-gnr4m\") pod \"5a1c1e92-d7ad-4186-91d0-9289605b8470\" (UID: \"5a1c1e92-d7ad-4186-91d0-9289605b8470\") " Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.069951 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9brc\" (UniqueName: \"kubernetes.io/projected/d1d6dbe5-8328-42cf-878e-ad70b3144799-kube-api-access-s9brc\") pod \"crc-debug-4xbjl\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.070000 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d1d6dbe5-8328-42cf-878e-ad70b3144799-host\") pod \"crc-debug-4xbjl\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.070102 5010 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a1c1e92-d7ad-4186-91d0-9289605b8470-host\") on node \"crc\" DevicePath \"\"" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.070170 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d1d6dbe5-8328-42cf-878e-ad70b3144799-host\") pod \"crc-debug-4xbjl\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.079850 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a1c1e92-d7ad-4186-91d0-9289605b8470-kube-api-access-gnr4m" (OuterVolumeSpecName: "kube-api-access-gnr4m") pod "5a1c1e92-d7ad-4186-91d0-9289605b8470" (UID: "5a1c1e92-d7ad-4186-91d0-9289605b8470"). InnerVolumeSpecName "kube-api-access-gnr4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.085903 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9brc\" (UniqueName: \"kubernetes.io/projected/d1d6dbe5-8328-42cf-878e-ad70b3144799-kube-api-access-s9brc\") pod \"crc-debug-4xbjl\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.172907 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnr4m\" (UniqueName: \"kubernetes.io/projected/5a1c1e92-d7ad-4186-91d0-9289605b8470-kube-api-access-gnr4m\") on node \"crc\" DevicePath \"\"" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.202544 5010 scope.go:117] "RemoveContainer" containerID="fc0af1701681bf6140cc2b14c620f28d3d2d459b5dc688b96bc52f2825a44a0c" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.202615 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-sqc6w" Nov 26 18:18:04 crc kubenswrapper[5010]: I1126 18:18:04.359607 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:04 crc kubenswrapper[5010]: W1126 18:18:04.401991 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd1d6dbe5_8328_42cf_878e_ad70b3144799.slice/crio-3ca7bad7a49a498224a9fe842ba6cc9dc9fe42965616d72dedf957a76024dbe5 WatchSource:0}: Error finding container 3ca7bad7a49a498224a9fe842ba6cc9dc9fe42965616d72dedf957a76024dbe5: Status 404 returned error can't find the container with id 3ca7bad7a49a498224a9fe842ba6cc9dc9fe42965616d72dedf957a76024dbe5 Nov 26 18:18:05 crc kubenswrapper[5010]: I1126 18:18:05.216627 5010 generic.go:334] "Generic (PLEG): container finished" podID="d1d6dbe5-8328-42cf-878e-ad70b3144799" containerID="ddf0b203dd99a986c773b72baa1baf0fe61f5576e2c21426d3614a376c6a5870" exitCode=0 Nov 26 18:18:05 crc kubenswrapper[5010]: I1126 18:18:05.216979 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/crc-debug-4xbjl" event={"ID":"d1d6dbe5-8328-42cf-878e-ad70b3144799","Type":"ContainerDied","Data":"ddf0b203dd99a986c773b72baa1baf0fe61f5576e2c21426d3614a376c6a5870"} Nov 26 18:18:05 crc kubenswrapper[5010]: I1126 18:18:05.217090 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/crc-debug-4xbjl" event={"ID":"d1d6dbe5-8328-42cf-878e-ad70b3144799","Type":"ContainerStarted","Data":"3ca7bad7a49a498224a9fe842ba6cc9dc9fe42965616d72dedf957a76024dbe5"} Nov 26 18:18:05 crc kubenswrapper[5010]: I1126 18:18:05.257479 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-djwnc/crc-debug-4xbjl"] Nov 26 18:18:05 crc kubenswrapper[5010]: I1126 18:18:05.267557 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-djwnc/crc-debug-4xbjl"] Nov 26 18:18:05 crc kubenswrapper[5010]: I1126 18:18:05.906341 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a1c1e92-d7ad-4186-91d0-9289605b8470" path="/var/lib/kubelet/pods/5a1c1e92-d7ad-4186-91d0-9289605b8470/volumes" Nov 26 18:18:06 crc kubenswrapper[5010]: I1126 18:18:06.342076 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:06 crc kubenswrapper[5010]: I1126 18:18:06.420458 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9brc\" (UniqueName: \"kubernetes.io/projected/d1d6dbe5-8328-42cf-878e-ad70b3144799-kube-api-access-s9brc\") pod \"d1d6dbe5-8328-42cf-878e-ad70b3144799\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " Nov 26 18:18:06 crc kubenswrapper[5010]: I1126 18:18:06.420742 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d1d6dbe5-8328-42cf-878e-ad70b3144799-host\") pod \"d1d6dbe5-8328-42cf-878e-ad70b3144799\" (UID: \"d1d6dbe5-8328-42cf-878e-ad70b3144799\") " Nov 26 18:18:06 crc kubenswrapper[5010]: I1126 18:18:06.420863 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d1d6dbe5-8328-42cf-878e-ad70b3144799-host" (OuterVolumeSpecName: "host") pod "d1d6dbe5-8328-42cf-878e-ad70b3144799" (UID: "d1d6dbe5-8328-42cf-878e-ad70b3144799"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 18:18:06 crc kubenswrapper[5010]: I1126 18:18:06.421461 5010 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d1d6dbe5-8328-42cf-878e-ad70b3144799-host\") on node \"crc\" DevicePath \"\"" Nov 26 18:18:06 crc kubenswrapper[5010]: I1126 18:18:06.427126 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1d6dbe5-8328-42cf-878e-ad70b3144799-kube-api-access-s9brc" (OuterVolumeSpecName: "kube-api-access-s9brc") pod "d1d6dbe5-8328-42cf-878e-ad70b3144799" (UID: "d1d6dbe5-8328-42cf-878e-ad70b3144799"). InnerVolumeSpecName "kube-api-access-s9brc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:18:06 crc kubenswrapper[5010]: I1126 18:18:06.523214 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9brc\" (UniqueName: \"kubernetes.io/projected/d1d6dbe5-8328-42cf-878e-ad70b3144799-kube-api-access-s9brc\") on node \"crc\" DevicePath \"\"" Nov 26 18:18:07 crc kubenswrapper[5010]: I1126 18:18:07.245593 5010 scope.go:117] "RemoveContainer" containerID="ddf0b203dd99a986c773b72baa1baf0fe61f5576e2c21426d3614a376c6a5870" Nov 26 18:18:07 crc kubenswrapper[5010]: I1126 18:18:07.245652 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/crc-debug-4xbjl" Nov 26 18:18:07 crc kubenswrapper[5010]: I1126 18:18:07.915215 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1d6dbe5-8328-42cf-878e-ad70b3144799" path="/var/lib/kubelet/pods/d1d6dbe5-8328-42cf-878e-ad70b3144799/volumes" Nov 26 18:18:11 crc kubenswrapper[5010]: I1126 18:18:11.423038 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:18:11 crc kubenswrapper[5010]: I1126 18:18:11.424290 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.422682 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.423661 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.423794 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.425134 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"67fa82ceefb0a9927b9de7df801e74f94dbff907b23d1a7863fb504d3662687f"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.425270 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://67fa82ceefb0a9927b9de7df801e74f94dbff907b23d1a7863fb504d3662687f" gracePeriod=600 Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.739678 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="67fa82ceefb0a9927b9de7df801e74f94dbff907b23d1a7863fb504d3662687f" exitCode=0 Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.739741 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"67fa82ceefb0a9927b9de7df801e74f94dbff907b23d1a7863fb504d3662687f"} Nov 26 18:18:41 crc kubenswrapper[5010]: I1126 18:18:41.740082 5010 scope.go:117] "RemoveContainer" containerID="838da65f2970474e1748b034541b526861b8b0656296a66aa4542cea2a31629e" Nov 26 18:18:42 crc kubenswrapper[5010]: I1126 18:18:42.763892 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5"} Nov 26 18:20:14 crc kubenswrapper[5010]: I1126 18:20:14.461431 5010 scope.go:117] "RemoveContainer" containerID="bd41edbdea298d31e912c66333baa31406d8e7c84a84aa2957f8f14ebfcd877e" Nov 26 18:20:14 crc kubenswrapper[5010]: I1126 18:20:14.501538 5010 scope.go:117] "RemoveContainer" containerID="569eae260444e6316a747dc345df54d378b71424e800d4b2830ddc288dca6ba1" Nov 26 18:20:41 crc kubenswrapper[5010]: I1126 18:20:41.424511 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:20:41 crc kubenswrapper[5010]: I1126 18:20:41.425046 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:21:11 crc kubenswrapper[5010]: I1126 18:21:11.422557 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:21:11 crc kubenswrapper[5010]: I1126 18:21:11.423235 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:21:14 crc kubenswrapper[5010]: I1126 18:21:14.606278 5010 scope.go:117] "RemoveContainer" containerID="2ab251ed9b811085915ca27fa93f6435e18ea1e3f016b4f85a6a8b94ab83c357" Nov 26 18:21:41 crc kubenswrapper[5010]: I1126 18:21:41.422940 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:21:41 crc kubenswrapper[5010]: I1126 18:21:41.423794 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:21:41 crc kubenswrapper[5010]: I1126 18:21:41.423856 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 18:21:41 crc kubenswrapper[5010]: I1126 18:21:41.424977 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 18:21:41 crc kubenswrapper[5010]: I1126 18:21:41.425069 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" gracePeriod=600 Nov 26 18:21:41 crc kubenswrapper[5010]: E1126 18:21:41.560273 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:21:42 crc kubenswrapper[5010]: I1126 18:21:42.094605 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" exitCode=0 Nov 26 18:21:42 crc kubenswrapper[5010]: I1126 18:21:42.094658 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5"} Nov 26 18:21:42 crc kubenswrapper[5010]: I1126 18:21:42.094697 5010 scope.go:117] "RemoveContainer" containerID="67fa82ceefb0a9927b9de7df801e74f94dbff907b23d1a7863fb504d3662687f" Nov 26 18:21:42 crc kubenswrapper[5010]: I1126 18:21:42.095523 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:21:42 crc kubenswrapper[5010]: E1126 18:21:42.095921 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:21:52 crc kubenswrapper[5010]: I1126 18:21:52.893085 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:21:52 crc kubenswrapper[5010]: E1126 18:21:52.894193 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:22:05 crc kubenswrapper[5010]: I1126 18:22:05.892487 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:22:05 crc kubenswrapper[5010]: E1126 18:22:05.893784 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:22:17 crc kubenswrapper[5010]: I1126 18:22:17.893238 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:22:17 crc kubenswrapper[5010]: E1126 18:22:17.894534 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:22:30 crc kubenswrapper[5010]: I1126 18:22:30.724313 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_81478787-2999-4b71-94f6-b4e1c2618f2a/init-config-reloader/0.log" Nov 26 18:22:30 crc kubenswrapper[5010]: I1126 18:22:30.892214 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:22:30 crc kubenswrapper[5010]: E1126 18:22:30.892522 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.108190 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_81478787-2999-4b71-94f6-b4e1c2618f2a/init-config-reloader/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.140794 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_81478787-2999-4b71-94f6-b4e1c2618f2a/config-reloader/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.165918 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_81478787-2999-4b71-94f6-b4e1c2618f2a/alertmanager/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.345705 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_3d638ae6-520f-40ce-af64-abedb51668a6/aodh-api/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.409370 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_3d638ae6-520f-40ce-af64-abedb51668a6/aodh-evaluator/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.419937 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_3d638ae6-520f-40ce-af64-abedb51668a6/aodh-listener/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.537637 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_3d638ae6-520f-40ce-af64-abedb51668a6/aodh-notifier/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.598730 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7545687684-8xwxg_ff07778f-8a03-4601-8581-e66658b53274/barbican-api/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.619601 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7545687684-8xwxg_ff07778f-8a03-4601-8581-e66658b53274/barbican-api-log/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.767251 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6d77f6958-2br52_d6098b4d-083b-4c62-942d-e5fc84af0084/barbican-keystone-listener/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.831007 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6d77f6958-2br52_d6098b4d-083b-4c62-942d-e5fc84af0084/barbican-keystone-listener-log/0.log" Nov 26 18:22:31 crc kubenswrapper[5010]: I1126 18:22:31.986806 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-68c7b56cb5-899x5_9c0404c5-4d78-4319-819d-97858c02ef0e/barbican-worker/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.010369 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-68c7b56cb5-899x5_9c0404c5-4d78-4319-819d-97858c02ef0e/barbican-worker-log/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.154478 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-openstack-openstack-cell1-j2qxs_96fc8d0a-7889-436f-95bd-2e6d59921db3/bootstrap-openstack-openstack-cell1/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.276670 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7045bdf2-1e74-43b5-9568-895046c3b8b2/ceilometer-central-agent/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.332558 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7045bdf2-1e74-43b5-9568-895046c3b8b2/ceilometer-notification-agent/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.377025 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7045bdf2-1e74-43b5-9568-895046c3b8b2/proxy-httpd/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.457550 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_7045bdf2-1e74-43b5-9568-895046c3b8b2/sg-core/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.594757 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_3308f215-a7a3-4810-bc0e-a6556edadf05/cinder-api/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.645143 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_3308f215-a7a3-4810-bc0e-a6556edadf05/cinder-api-log/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.801547 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_73c74793-8ce0-4b8c-92f9-5e01c0462723/cinder-scheduler/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.889162 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_73c74793-8ce0-4b8c-92f9-5e01c0462723/probe/0.log" Nov 26 18:22:32 crc kubenswrapper[5010]: I1126 18:22:32.969816 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-openstack-openstack-cell1-9w5cw_e4c8ec39-07f8-45d9-b135-175e573d1530/configure-network-openstack-openstack-cell1/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.117855 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-openstack-openstack-cell1-fxj9x_d007a94e-fb49-436e-b5ca-ae0c5e791540/configure-os-openstack-openstack-cell1/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.204170 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7947bf78cc-xwb6q_422202d4-c238-4769-9039-1cbbe92950c5/init/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.402264 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7947bf78cc-xwb6q_422202d4-c238-4769-9039-1cbbe92950c5/dnsmasq-dns/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.419918 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-7947bf78cc-xwb6q_422202d4-c238-4769-9039-1cbbe92950c5/init/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.425582 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-openstack-openstack-cell1-8t624_b09df012-4d2d-418a-8b1b-79247ce409f3/download-cache-openstack-openstack-cell1/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.620915 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_9d6d21b5-fc5a-45ba-a975-f5bc02271e5f/glance-httpd/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.642907 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_9d6d21b5-fc5a-45ba-a975-f5bc02271e5f/glance-log/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.842132 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_c8aacff2-d50d-4892-980a-6d708f73e1e4/glance-log/0.log" Nov 26 18:22:33 crc kubenswrapper[5010]: I1126 18:22:33.867838 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_c8aacff2-d50d-4892-980a-6d708f73e1e4/glance-httpd/0.log" Nov 26 18:22:34 crc kubenswrapper[5010]: I1126 18:22:34.370650 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-75d4958f6-ntkst_ca0aa660-11d7-4ab4-8edf-cead47f8c396/heat-engine/0.log" Nov 26 18:22:34 crc kubenswrapper[5010]: I1126 18:22:34.436908 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-9d4c7768f-xlvpp_3503617f-ad5f-4f7a-b67f-03d8cc42e360/heat-api/0.log" Nov 26 18:22:34 crc kubenswrapper[5010]: I1126 18:22:34.600075 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-98fd67cb-2wpwn_a14ed063-d477-4b0d-8d6b-064deba25b74/heat-cfnapi/0.log" Nov 26 18:22:34 crc kubenswrapper[5010]: I1126 18:22:34.661155 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5f65b9f7c4-6mgjh_d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b/horizon/0.log" Nov 26 18:22:34 crc kubenswrapper[5010]: I1126 18:22:34.667186 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-openstack-openstack-cell1-jcr48_ed349151-ee95-4152-bff2-a9607e724140/install-certs-openstack-openstack-cell1/0.log" Nov 26 18:22:34 crc kubenswrapper[5010]: I1126 18:22:34.985462 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-openstack-openstack-cell1-dgxbz_16688d62-61dc-4d17-9540-35697c945721/install-os-openstack-openstack-cell1/0.log" Nov 26 18:22:35 crc kubenswrapper[5010]: I1126 18:22:35.234093 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-5f65b9f7c4-6mgjh_d301e4c9-fe99-4ba6-a0d3-7ba3cfecab8b/horizon-log/0.log" Nov 26 18:22:35 crc kubenswrapper[5010]: I1126 18:22:35.327870 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-855b4c8bc9-8m6lg_f9c273a9-97a8-4386-9f3c-ceca459cc42e/keystone-api/0.log" Nov 26 18:22:35 crc kubenswrapper[5010]: I1126 18:22:35.361731 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29402941-mssk5_338acf82-ff16-48d1-9cf3-ffbde62f81e6/keystone-cron/0.log" Nov 26 18:22:35 crc kubenswrapper[5010]: I1126 18:22:35.461889 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29403001-9675b_7944fe1e-8e94-4f90-b1de-984ae9b16948/keystone-cron/0.log" Nov 26 18:22:35 crc kubenswrapper[5010]: I1126 18:22:35.558699 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_88053629-842a-4282-b167-0a985ca95b54/kube-state-metrics/0.log" Nov 26 18:22:35 crc kubenswrapper[5010]: I1126 18:22:35.626212 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-openstack-openstack-cell1-gf4kg_42336123-89c2-4bfd-8772-ce5dca1dd4a5/libvirt-openstack-openstack-cell1/0.log" Nov 26 18:22:36 crc kubenswrapper[5010]: I1126 18:22:36.809547 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-86bf4f9bd7-vb726_6cc94ff2-fe26-443d-bdbc-c376d3aa59ba/neutron-httpd/0.log" Nov 26 18:22:36 crc kubenswrapper[5010]: I1126 18:22:36.860881 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-86bf4f9bd7-vb726_6cc94ff2-fe26-443d-bdbc-c376d3aa59ba/neutron-api/0.log" Nov 26 18:22:36 crc kubenswrapper[5010]: I1126 18:22:36.970404 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-dhcp-openstack-openstack-cell1-br7dh_990bc3e4-a901-447a-b15a-a2fd34d84290/neutron-dhcp-openstack-openstack-cell1/0.log" Nov 26 18:22:37 crc kubenswrapper[5010]: I1126 18:22:37.150868 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-openstack-openstack-cell1-djdb8_5d7160f7-44f5-4094-a05e-692f659806bc/neutron-metadata-openstack-openstack-cell1/0.log" Nov 26 18:22:37 crc kubenswrapper[5010]: I1126 18:22:37.277011 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-sriov-openstack-openstack-cell1-qcxvr_0f4095f7-6cba-45da-b62e-8e39587d45b0/neutron-sriov-openstack-openstack-cell1/0.log" Nov 26 18:22:37 crc kubenswrapper[5010]: I1126 18:22:37.575036 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_bc980ee9-e3a1-4293-9030-4bd470e8d0f9/nova-api-log/0.log" Nov 26 18:22:37 crc kubenswrapper[5010]: I1126 18:22:37.606317 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_bc980ee9-e3a1-4293-9030-4bd470e8d0f9/nova-api-api/0.log" Nov 26 18:22:37 crc kubenswrapper[5010]: I1126 18:22:37.641748 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_b7fdf798-30ad-49bc-9c7a-7684b52e34bf/nova-cell0-conductor-conductor/0.log" Nov 26 18:22:37 crc kubenswrapper[5010]: I1126 18:22:37.888911 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_5c175dd6-451e-4a91-8aea-f46e31c375a6/nova-cell1-conductor-conductor/0.log" Nov 26 18:22:38 crc kubenswrapper[5010]: I1126 18:22:38.857676 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell67ckb_ba894d64-c3e3-4595-a376-bfdc8429afca/nova-cell1-openstack-nova-compute-ffu-cell1-openstack-cell1/0.log" Nov 26 18:22:38 crc kubenswrapper[5010]: I1126 18:22:38.875666 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_43b78155-fd04-4435-a32a-21cc639a234a/nova-cell1-novncproxy-novncproxy/0.log" Nov 26 18:22:39 crc kubenswrapper[5010]: I1126 18:22:39.107306 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-openstack-openstack-cell1-6gf92_803b0121-2a6a-4ee8-b835-397db3b6bd43/nova-cell1-openstack-openstack-cell1/0.log" Nov 26 18:22:39 crc kubenswrapper[5010]: I1126 18:22:39.243347 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_cbe636bf-8dbb-47f5-9af6-50601035a730/nova-metadata-log/0.log" Nov 26 18:22:39 crc kubenswrapper[5010]: I1126 18:22:39.556508 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_e3f23011-c51f-4c71-b83a-fd35b10153e4/nova-scheduler-scheduler/0.log" Nov 26 18:22:39 crc kubenswrapper[5010]: I1126 18:22:39.608444 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-86dcb45b4b-cptcz_0cdd6169-a519-4a15-810a-b774180a35bb/init/0.log" Nov 26 18:22:39 crc kubenswrapper[5010]: I1126 18:22:39.762185 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_cbe636bf-8dbb-47f5-9af6-50601035a730/nova-metadata-metadata/0.log" Nov 26 18:22:39 crc kubenswrapper[5010]: I1126 18:22:39.908223 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-86dcb45b4b-cptcz_0cdd6169-a519-4a15-810a-b774180a35bb/init/0.log" Nov 26 18:22:39 crc kubenswrapper[5010]: I1126 18:22:39.936134 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-86dcb45b4b-cptcz_0cdd6169-a519-4a15-810a-b774180a35bb/octavia-api-provider-agent/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.147020 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-rs97n_d7332edc-62f6-4f6f-a6b5-8024a073631e/init/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.180563 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-api-86dcb45b4b-cptcz_0cdd6169-a519-4a15-810a-b774180a35bb/octavia-api/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.357161 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-hr695_4d881ca5-eedc-4457-a85e-252ebb895dc3/init/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.446314 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-rs97n_d7332edc-62f6-4f6f-a6b5-8024a073631e/init/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.476328 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-healthmanager-rs97n_d7332edc-62f6-4f6f-a6b5-8024a073631e/octavia-healthmanager/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.706887 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-hr695_4d881ca5-eedc-4457-a85e-252ebb895dc3/init/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.788242 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-housekeeping-hr695_4d881ca5-eedc-4457-a85e-252ebb895dc3/octavia-housekeeping/0.log" Nov 26 18:22:40 crc kubenswrapper[5010]: I1126 18:22:40.810673 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-5955f5554b-22m5x_c2804229-e969-49e6-806a-d132e8338b87/init/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.017931 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-qwndp_04ed61fb-8390-4ee1-a052-332b2bfdb369/init/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.034210 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-5955f5554b-22m5x_c2804229-e969-49e6-806a-d132e8338b87/octavia-amphora-httpd/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.046938 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-image-upload-5955f5554b-22m5x_c2804229-e969-49e6-806a-d132e8338b87/init/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.255629 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-qwndp_04ed61fb-8390-4ee1-a052-332b2bfdb369/octavia-rsyslog/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.264925 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-rsyslog-qwndp_04ed61fb-8390-4ee1-a052-332b2bfdb369/init/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.395313 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-92kdl_dc7aa8a9-668c-485c-ad2b-6ba848d528b7/init/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.633310 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-92kdl_dc7aa8a9-668c-485c-ad2b-6ba848d528b7/init/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.689441 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_536595b1-5ba9-4588-8e64-32480adb79ea/mysql-bootstrap/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.779614 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_octavia-worker-92kdl_dc7aa8a9-668c-485c-ad2b-6ba848d528b7/octavia-worker/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.891479 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:22:41 crc kubenswrapper[5010]: E1126 18:22:41.892267 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.930039 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_536595b1-5ba9-4588-8e64-32480adb79ea/galera/0.log" Nov 26 18:22:41 crc kubenswrapper[5010]: I1126 18:22:41.972370 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_536595b1-5ba9-4588-8e64-32480adb79ea/mysql-bootstrap/0.log" Nov 26 18:22:42 crc kubenswrapper[5010]: I1126 18:22:42.004664 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_4456ea0d-01da-4a0a-b918-db686f0e23aa/mysql-bootstrap/0.log" Nov 26 18:22:42 crc kubenswrapper[5010]: I1126 18:22:42.210323 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_4456ea0d-01da-4a0a-b918-db686f0e23aa/mysql-bootstrap/0.log" Nov 26 18:22:42 crc kubenswrapper[5010]: I1126 18:22:42.524303 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_28b038c4-c8f4-4e86-835b-7225647d8e9a/openstackclient/0.log" Nov 26 18:22:42 crc kubenswrapper[5010]: I1126 18:22:42.544371 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_4456ea0d-01da-4a0a-b918-db686f0e23aa/galera/0.log" Nov 26 18:22:42 crc kubenswrapper[5010]: I1126 18:22:42.792684 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-49g6w_2bce34e3-639b-4cbb-97bb-5edc1650ad69/openstack-network-exporter/0.log" Nov 26 18:22:42 crc kubenswrapper[5010]: I1126 18:22:42.794240 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pbsm5_dfa9a474-ac55-432d-9f63-9b6d4daa9af5/ovsdb-server-init/0.log" Nov 26 18:22:42 crc kubenswrapper[5010]: I1126 18:22:42.974463 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pbsm5_dfa9a474-ac55-432d-9f63-9b6d4daa9af5/ovsdb-server-init/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.037483 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pbsm5_dfa9a474-ac55-432d-9f63-9b6d4daa9af5/ovsdb-server/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.042495 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-pbsm5_dfa9a474-ac55-432d-9f63-9b6d4daa9af5/ovs-vswitchd/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.190291 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-w9882_30cb9d89-279d-4bb8-bd1b-81e1dd58368a/ovn-controller/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.305111 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_75cbf5ad-ffb6-4a24-abe6-1b495c404f08/openstack-network-exporter/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.308032 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_75cbf5ad-ffb6-4a24-abe6-1b495c404f08/ovn-northd/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.515371 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8ffb280d-8fa7-48c5-9407-42a21ac5b021/openstack-network-exporter/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.594073 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-openstack-openstack-cell1-slxr9_e8cd0354-9014-4ae0-b985-f3c0e9e4d456/ovn-openstack-openstack-cell1/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.699316 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8ffb280d-8fa7-48c5-9407-42a21ac5b021/ovsdbserver-nb/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.799011 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_891a879a-7cb6-44cc-ac0a-05656b5a0ed0/openstack-network-exporter/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.800422 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-1_891a879a-7cb6-44cc-ac0a-05656b5a0ed0/ovsdbserver-nb/0.log" Nov 26 18:22:43 crc kubenswrapper[5010]: I1126 18:22:43.998123 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_0c1b878d-4a1d-4d47-b4cf-b366607c8631/openstack-network-exporter/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.003343 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-2_0c1b878d-4a1d-4d47-b4cf-b366607c8631/ovsdbserver-nb/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.161002 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b1f50f96-b58f-4e55-ae75-3324dd5cdc76/openstack-network-exporter/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.198315 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_b1f50f96-b58f-4e55-ae75-3324dd5cdc76/ovsdbserver-sb/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.333062 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_51e8e82f-9164-482c-a05e-556960a05d88/openstack-network-exporter/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.378251 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-1_51e8e82f-9164-482c-a05e-556960a05d88/ovsdbserver-sb/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.536484 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_7ecc2ae9-7598-47f5-a481-967ef5353ff4/ovsdbserver-sb/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.563109 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-2_7ecc2ae9-7598-47f5-a481-967ef5353ff4/openstack-network-exporter/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.762621 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-f6db4d686-lqclr_5412afb1-3aa4-4a56-8078-23e8c783f3ea/placement-log/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.799938 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-f6db4d686-lqclr_5412afb1-3aa4-4a56-8078-23e8c783f3ea/placement-api/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.925445 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_pre-adoption-validation-openstack-pre-adoption-openstack-c5zk5l_a38ccb1f-698d-4464-986d-6b2d5ac67beb/pre-adoption-validation-openstack-pre-adoption-openstack-cell1/0.log" Nov 26 18:22:44 crc kubenswrapper[5010]: I1126 18:22:44.982202 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_1c74ec58-98b4-4a24-995d-a4c6c15376a9/init-config-reloader/0.log" Nov 26 18:22:45 crc kubenswrapper[5010]: I1126 18:22:45.243646 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_1c74ec58-98b4-4a24-995d-a4c6c15376a9/init-config-reloader/0.log" Nov 26 18:22:45 crc kubenswrapper[5010]: I1126 18:22:45.258573 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_1c74ec58-98b4-4a24-995d-a4c6c15376a9/config-reloader/0.log" Nov 26 18:22:45 crc kubenswrapper[5010]: I1126 18:22:45.291007 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_1c74ec58-98b4-4a24-995d-a4c6c15376a9/thanos-sidecar/0.log" Nov 26 18:22:45 crc kubenswrapper[5010]: I1126 18:22:45.315670 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_1c74ec58-98b4-4a24-995d-a4c6c15376a9/prometheus/0.log" Nov 26 18:22:45 crc kubenswrapper[5010]: I1126 18:22:45.472042 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_900e098f-8106-435a-964a-a4e3755308fc/setup-container/0.log" Nov 26 18:22:46 crc kubenswrapper[5010]: I1126 18:22:46.471292 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa44ef1f-4c07-4afd-97c6-9e0075ad6f71/setup-container/0.log" Nov 26 18:22:46 crc kubenswrapper[5010]: I1126 18:22:46.471325 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_900e098f-8106-435a-964a-a4e3755308fc/setup-container/0.log" Nov 26 18:22:46 crc kubenswrapper[5010]: I1126 18:22:46.573356 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_900e098f-8106-435a-964a-a4e3755308fc/rabbitmq/0.log" Nov 26 18:22:46 crc kubenswrapper[5010]: I1126 18:22:46.798700 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-openstack-openstack-cell1-9qjx4_3fc3e158-6b98-4a72-85b5-a50aad4fe33e/reboot-os-openstack-openstack-cell1/0.log" Nov 26 18:22:46 crc kubenswrapper[5010]: I1126 18:22:46.828533 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa44ef1f-4c07-4afd-97c6-9e0075ad6f71/setup-container/0.log" Nov 26 18:22:46 crc kubenswrapper[5010]: I1126 18:22:46.983417 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_aa44ef1f-4c07-4afd-97c6-9e0075ad6f71/rabbitmq/0.log" Nov 26 18:22:47 crc kubenswrapper[5010]: I1126 18:22:47.042663 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-openstack-openstack-cell1-dzfdn_78e89a72-3a79-4431-aa18-ea2e358242ec/run-os-openstack-openstack-cell1/0.log" Nov 26 18:22:47 crc kubenswrapper[5010]: I1126 18:22:47.248996 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-openstack-j7psw_2541fd34-09c5-44d6-aad0-f308b87d63aa/ssh-known-hosts-openstack/0.log" Nov 26 18:22:47 crc kubenswrapper[5010]: I1126 18:22:47.438155 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6d5d78b986-llp52_c29d3e67-7707-42ab-b03f-d2240fef0672/proxy-server/0.log" Nov 26 18:22:47 crc kubenswrapper[5010]: I1126 18:22:47.549861 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6d5d78b986-llp52_c29d3e67-7707-42ab-b03f-d2240fef0672/proxy-httpd/0.log" Nov 26 18:22:48 crc kubenswrapper[5010]: I1126 18:22:48.251500 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-brrhc_a3b12744-4bf5-44b6-9584-2e2edf84b267/swift-ring-rebalance/0.log" Nov 26 18:22:48 crc kubenswrapper[5010]: I1126 18:22:48.319526 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-openstack-openstack-cell1-5t8x4_8f5656c3-ac2f-4666-89a8-70a09fee6e15/telemetry-openstack-openstack-cell1/0.log" Nov 26 18:22:48 crc kubenswrapper[5010]: I1126 18:22:48.484876 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tripleo-cleanup-tripleo-cleanup-openstack-cell1-8k9dm_484ffc98-a27e-4fc3-9fb9-70c960bd0699/tripleo-cleanup-tripleo-cleanup-openstack-cell1/0.log" Nov 26 18:22:48 crc kubenswrapper[5010]: I1126 18:22:48.570970 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-openstack-openstack-cell1-rrchd_0decd137-74df-4dea-81f5-4b5431d96871/validate-network-openstack-openstack-cell1/0.log" Nov 26 18:22:49 crc kubenswrapper[5010]: I1126 18:22:49.845044 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_72e344f5-2a4d-47df-9ae0-59758d16ba41/memcached/0.log" Nov 26 18:22:54 crc kubenswrapper[5010]: I1126 18:22:54.891115 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:22:54 crc kubenswrapper[5010]: E1126 18:22:54.891789 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:23:07 crc kubenswrapper[5010]: I1126 18:23:07.892381 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:23:07 crc kubenswrapper[5010]: E1126 18:23:07.893235 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.072102 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5_6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae/util/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.232836 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5_6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae/util/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.235167 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5_6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae/pull/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.255247 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5_6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae/pull/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.455173 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5_6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae/extract/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.479613 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5_6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae/pull/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.532411 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_3711d63ee32771f989196ada809b479d54ff48bb9ee48b91ea976dcc5apb4d5_6d3c5be9-3acf-4cf9-bfda-54ab8d80f3ae/util/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.691422 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-c89k7_9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f/kube-rbac-proxy/0.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.769391 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-c89k7_9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f/manager/1.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.788741 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7b64f4fb85-c89k7_9e0c2ada-ac2c-4fc8-b786-2a62f0458c2f/manager/2.log" Nov 26 18:23:17 crc kubenswrapper[5010]: I1126 18:23:17.858615 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-sbppr_6a970d68-d885-4fc2-9d58-508537a42572/kube-rbac-proxy/0.log" Nov 26 18:23:18 crc kubenswrapper[5010]: I1126 18:23:18.000419 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-sbppr_6a970d68-d885-4fc2-9d58-508537a42572/manager/2.log" Nov 26 18:23:18 crc kubenswrapper[5010]: I1126 18:23:18.027369 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6b7f75547b-sbppr_6a970d68-d885-4fc2-9d58-508537a42572/manager/1.log" Nov 26 18:23:18 crc kubenswrapper[5010]: I1126 18:23:18.041517 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-qmr28_a4bbf592-007c-4176-a6a3-0209b33b6048/kube-rbac-proxy/0.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.052979 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-qmr28_a4bbf592-007c-4176-a6a3-0209b33b6048/manager/2.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.062522 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-4w8ql_b6c13a13-621b-45cb-9830-4dfaf15ee06b/kube-rbac-proxy/0.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.086958 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-955677c94-qmr28_a4bbf592-007c-4176-a6a3-0209b33b6048/manager/1.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.287031 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-p5446_7ec0a644-00e0-4b67-b2ad-7a7128dcaf19/kube-rbac-proxy/0.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.288415 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-4w8ql_b6c13a13-621b-45cb-9830-4dfaf15ee06b/manager/1.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.304900 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-589cbd6b5b-4w8ql_b6c13a13-621b-45cb-9830-4dfaf15ee06b/manager/2.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.500959 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-p5446_7ec0a644-00e0-4b67-b2ad-7a7128dcaf19/manager/2.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.504103 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-mc96z_8b2b09a7-2b17-43da-ae0e-4448b96eed50/kube-rbac-proxy/0.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.534164 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5b77f656f-p5446_7ec0a644-00e0-4b67-b2ad-7a7128dcaf19/manager/1.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.664121 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-mc96z_8b2b09a7-2b17-43da-ae0e-4448b96eed50/manager/1.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.676433 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d494799bf-mc96z_8b2b09a7-2b17-43da-ae0e-4448b96eed50/manager/2.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.736868 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-sxdct_93625d2a-6f36-43a8-b26c-8f6506955b15/kube-rbac-proxy/0.log" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.905930 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:23:19 crc kubenswrapper[5010]: E1126 18:23:19.906278 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:23:19 crc kubenswrapper[5010]: I1126 18:23:19.907873 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-sxdct_93625d2a-6f36-43a8-b26c-8f6506955b15/manager/2.log" Nov 26 18:23:20 crc kubenswrapper[5010]: I1126 18:23:20.003061 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-dhngn_ec8d3bdf-fc89-426b-82e9-a1ae81a3e548/kube-rbac-proxy/0.log" Nov 26 18:23:20 crc kubenswrapper[5010]: I1126 18:23:20.089770 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-sxdct_93625d2a-6f36-43a8-b26c-8f6506955b15/manager/3.log" Nov 26 18:23:20 crc kubenswrapper[5010]: I1126 18:23:20.137751 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-dhngn_ec8d3bdf-fc89-426b-82e9-a1ae81a3e548/manager/3.log" Nov 26 18:23:20 crc kubenswrapper[5010]: I1126 18:23:20.143034 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-67cb4dc6d4-dhngn_ec8d3bdf-fc89-426b-82e9-a1ae81a3e548/manager/2.log" Nov 26 18:23:20 crc kubenswrapper[5010]: I1126 18:23:20.926655 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-9lx7h_ce1fedbc-31da-4c37-9731-34e79ab604f4/manager/1.log" Nov 26 18:23:20 crc kubenswrapper[5010]: I1126 18:23:20.990354 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-9lx7h_ce1fedbc-31da-4c37-9731-34e79ab604f4/kube-rbac-proxy/0.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.082970 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7b4567c7cf-9lx7h_ce1fedbc-31da-4c37-9731-34e79ab604f4/manager/2.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.115511 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-k7vx2_7e5769c2-7f83-41ff-9365-7f5792e8d81b/kube-rbac-proxy/0.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.145055 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-k7vx2_7e5769c2-7f83-41ff-9365-7f5792e8d81b/manager/2.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.214989 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-5d499bf58b-k7vx2_7e5769c2-7f83-41ff-9365-7f5792e8d81b/manager/1.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.282458 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-sj6tg_dfb4a15b-a139-4778-acc7-f236e947ca96/kube-rbac-proxy/0.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.317383 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-sj6tg_dfb4a15b-a139-4778-acc7-f236e947ca96/manager/2.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.346524 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-66f4dd4bc7-sj6tg_dfb4a15b-a139-4778-acc7-f236e947ca96/manager/1.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.395884 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-f64fd_191eef94-8fdf-4180-8ce0-1d62fc3f0de0/kube-rbac-proxy/0.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.475152 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-f64fd_191eef94-8fdf-4180-8ce0-1d62fc3f0de0/manager/2.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.533345 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-6fdcddb789-f64fd_191eef94-8fdf-4180-8ce0-1d62fc3f0de0/manager/1.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.593578 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5llrj_05194bfa-88c3-4826-8a59-6d62252e4b1a/kube-rbac-proxy/0.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.665611 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5llrj_05194bfa-88c3-4826-8a59-6d62252e4b1a/manager/1.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.709495 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-79556f57fc-5llrj_05194bfa-88c3-4826-8a59-6d62252e4b1a/manager/2.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.777129 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-fx8tr_b4799b0e-11ed-4331-84d1-daf581d00bbe/kube-rbac-proxy/0.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.848558 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-fx8tr_b4799b0e-11ed-4331-84d1-daf581d00bbe/manager/3.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.885139 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-fx8tr_b4799b0e-11ed-4331-84d1-daf581d00bbe/manager/2.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.929846 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-674cb676c8nb4vx_3daf5f1d-5d15-4b93-ac0b-8209060a0557/kube-rbac-proxy/0.log" Nov 26 18:23:21 crc kubenswrapper[5010]: I1126 18:23:21.991810 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-674cb676c8nb4vx_3daf5f1d-5d15-4b93-ac0b-8209060a0557/manager/1.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.067638 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-674cb676c8nb4vx_3daf5f1d-5d15-4b93-ac0b-8209060a0557/manager/0.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.227290 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-659d75f7c6-lwbrh_1b523418-d938-4ba7-8788-b93b382429e3/manager/2.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.328247 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-544fb75865-bd9lh_a3bc645d-4358-47cb-9e3b-ebc975c69092/operator/1.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.595755 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-gcj9h_b0d7107e-a617-4a7b-a6e3-0267996965ef/kube-rbac-proxy/0.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.782442 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-gcj9h_b0d7107e-a617-4a7b-a6e3-0267996965ef/manager/1.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.814529 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-544fb75865-bd9lh_a3bc645d-4358-47cb-9e3b-ebc975c69092/operator/0.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.831642 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-56897c768d-gcj9h_b0d7107e-a617-4a7b-a6e3-0267996965ef/manager/2.log" Nov 26 18:23:22 crc kubenswrapper[5010]: I1126 18:23:22.954576 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-56fnh_5a87f5af-beea-4084-8351-4d333378baf8/registry-server/0.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.025226 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-zq8vc_bf155072-f786-47eb-9455-f807444d12e9/manager/2.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.048227 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-zq8vc_bf155072-f786-47eb-9455-f807444d12e9/kube-rbac-proxy/0.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.160299 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-57988cc5b5-zq8vc_bf155072-f786-47eb-9455-f807444d12e9/manager/1.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.289666 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-h9gnm_cdfa6310-b994-49ba-8e89-dc6584a65314/operator/3.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.305938 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-h9gnm_cdfa6310-b994-49ba-8e89-dc6584a65314/operator/2.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.423825 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-nfl24_82a45cae-9275-4f6a-8807-1ed1c97da89e/kube-rbac-proxy/0.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.567339 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-nfl24_82a45cae-9275-4f6a-8807-1ed1c97da89e/manager/1.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.583307 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-d77b94747-nfl24_82a45cae-9275-4f6a-8807-1ed1c97da89e/manager/2.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.651172 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-zrldc_01236c17-da54-4428-9e82-9a3b0165d6fc/kube-rbac-proxy/0.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.835121 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-zrldc_01236c17-da54-4428-9e82-9a3b0165d6fc/manager/1.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.880550 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-xmltd_1ff0a07f-935b-493a-a18a-a449232dc185/kube-rbac-proxy/0.log" Nov 26 18:23:23 crc kubenswrapper[5010]: I1126 18:23:23.992759 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-zrldc_01236c17-da54-4428-9e82-9a3b0165d6fc/manager/2.log" Nov 26 18:23:24 crc kubenswrapper[5010]: I1126 18:23:24.028919 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-659d75f7c6-lwbrh_1b523418-d938-4ba7-8788-b93b382429e3/manager/3.log" Nov 26 18:23:24 crc kubenswrapper[5010]: I1126 18:23:24.044850 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-xmltd_1ff0a07f-935b-493a-a18a-a449232dc185/manager/0.log" Nov 26 18:23:24 crc kubenswrapper[5010]: I1126 18:23:24.089745 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5cd6c7f4c8-xmltd_1ff0a07f-935b-493a-a18a-a449232dc185/manager/1.log" Nov 26 18:23:24 crc kubenswrapper[5010]: I1126 18:23:24.159780 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bdtsk_522c2ed1-a470-4885-88fc-395ed7834b23/kube-rbac-proxy/0.log" Nov 26 18:23:24 crc kubenswrapper[5010]: I1126 18:23:24.190993 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bdtsk_522c2ed1-a470-4885-88fc-395ed7834b23/manager/2.log" Nov 26 18:23:24 crc kubenswrapper[5010]: I1126 18:23:24.234173 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-bdtsk_522c2ed1-a470-4885-88fc-395ed7834b23/manager/1.log" Nov 26 18:23:30 crc kubenswrapper[5010]: I1126 18:23:30.892095 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:23:30 crc kubenswrapper[5010]: E1126 18:23:30.892998 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:23:44 crc kubenswrapper[5010]: I1126 18:23:44.892694 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:23:44 crc kubenswrapper[5010]: E1126 18:23:44.893392 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:23:45 crc kubenswrapper[5010]: I1126 18:23:45.010367 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-sxvhn_1383de27-90fb-498e-8e3e-b622760bfb96/control-plane-machine-set-operator/0.log" Nov 26 18:23:45 crc kubenswrapper[5010]: I1126 18:23:45.087813 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-f6hqp_80e56b90-699c-4fcd-b69a-748b192fce11/kube-rbac-proxy/0.log" Nov 26 18:23:45 crc kubenswrapper[5010]: I1126 18:23:45.156324 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-f6hqp_80e56b90-699c-4fcd-b69a-748b192fce11/machine-api-operator/0.log" Nov 26 18:23:59 crc kubenswrapper[5010]: I1126 18:23:59.906466 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:23:59 crc kubenswrapper[5010]: E1126 18:23:59.909522 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:24:01 crc kubenswrapper[5010]: I1126 18:24:01.319830 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-6pqsn_ad59753d-a191-4ef5-9945-d1126e81bb8e/cert-manager-controller/1.log" Nov 26 18:24:01 crc kubenswrapper[5010]: I1126 18:24:01.392385 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-6pqsn_ad59753d-a191-4ef5-9945-d1126e81bb8e/cert-manager-controller/0.log" Nov 26 18:24:01 crc kubenswrapper[5010]: I1126 18:24:01.565443 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-rkkql_35029056-31b7-46c8-9ac0-93d2c36ae95f/cert-manager-cainjector/0.log" Nov 26 18:24:01 crc kubenswrapper[5010]: I1126 18:24:01.658833 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-r7qmc_53dc96f8-9b73-42e8-ada1-7bf243575c6b/cert-manager-webhook/0.log" Nov 26 18:24:14 crc kubenswrapper[5010]: I1126 18:24:14.891101 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:24:14 crc kubenswrapper[5010]: E1126 18:24:14.891959 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:24:18 crc kubenswrapper[5010]: I1126 18:24:18.559383 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-4cbrf_5dfb6640-deaa-4758-ac2a-bd2cc1db4508/nmstate-console-plugin/0.log" Nov 26 18:24:18 crc kubenswrapper[5010]: I1126 18:24:18.701766 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-cf7fq_cd932913-4bd6-409f-bc77-688af8d29524/nmstate-handler/0.log" Nov 26 18:24:18 crc kubenswrapper[5010]: I1126 18:24:18.759745 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-8pmmg_79ffe6fa-990a-422f-ba69-151aacb5592b/kube-rbac-proxy/0.log" Nov 26 18:24:18 crc kubenswrapper[5010]: I1126 18:24:18.774187 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-8pmmg_79ffe6fa-990a-422f-ba69-151aacb5592b/nmstate-metrics/0.log" Nov 26 18:24:18 crc kubenswrapper[5010]: I1126 18:24:18.983835 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-2whk4_f9552f8b-91b2-41aa-a1f4-2239ee49085d/nmstate-operator/0.log" Nov 26 18:24:19 crc kubenswrapper[5010]: I1126 18:24:19.027432 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-qgmx6_c21d70d9-5cf1-46c9-95af-510e964cfff9/nmstate-webhook/0.log" Nov 26 18:24:25 crc kubenswrapper[5010]: I1126 18:24:25.892230 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:24:25 crc kubenswrapper[5010]: E1126 18:24:25.893799 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:24:36 crc kubenswrapper[5010]: I1126 18:24:36.391444 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-vjlsw_6a24f7b6-d06d-4f11-a632-d997d92a5c5b/kube-rbac-proxy/0.log" Nov 26 18:24:36 crc kubenswrapper[5010]: I1126 18:24:36.600826 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-frr-files/0.log" Nov 26 18:24:36 crc kubenswrapper[5010]: I1126 18:24:36.826628 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-vjlsw_6a24f7b6-d06d-4f11-a632-d997d92a5c5b/controller/0.log" Nov 26 18:24:36 crc kubenswrapper[5010]: I1126 18:24:36.859546 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-frr-files/0.log" Nov 26 18:24:36 crc kubenswrapper[5010]: I1126 18:24:36.859587 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-reloader/0.log" Nov 26 18:24:36 crc kubenswrapper[5010]: I1126 18:24:36.904377 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-metrics/0.log" Nov 26 18:24:37 crc kubenswrapper[5010]: I1126 18:24:37.027519 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-reloader/0.log" Nov 26 18:24:37 crc kubenswrapper[5010]: I1126 18:24:37.179188 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-metrics/0.log" Nov 26 18:24:37 crc kubenswrapper[5010]: I1126 18:24:37.209169 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-frr-files/0.log" Nov 26 18:24:37 crc kubenswrapper[5010]: I1126 18:24:37.215159 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-reloader/0.log" Nov 26 18:24:37 crc kubenswrapper[5010]: I1126 18:24:37.246123 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-metrics/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.302225 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-frr-files/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.330186 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-reloader/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.335703 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/controller/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.341690 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/cp-metrics/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.464742 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/frr-metrics/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.527733 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/kube-rbac-proxy/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.561264 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/kube-rbac-proxy-frr/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.704627 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/reloader/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.820049 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-q4w26_6003861e-afe0-4607-a3d4-05f646e2519a/frr-k8s-webhook-server/0.log" Nov 26 18:24:38 crc kubenswrapper[5010]: I1126 18:24:38.891449 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:24:38 crc kubenswrapper[5010]: E1126 18:24:38.891723 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:24:39 crc kubenswrapper[5010]: I1126 18:24:39.243218 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7757b8b846-drzn5_afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6/manager/3.log" Nov 26 18:24:39 crc kubenswrapper[5010]: I1126 18:24:39.293115 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-7757b8b846-drzn5_afe2abf1-27ec-4e60-b337-2aa8f2a1d3c6/manager/2.log" Nov 26 18:24:39 crc kubenswrapper[5010]: I1126 18:24:39.462880 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-597c8d6cb6-jk955_93eee346-e7d2-4097-896b-cc1ffa20d03b/webhook-server/0.log" Nov 26 18:24:40 crc kubenswrapper[5010]: I1126 18:24:40.158128 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-ddjx5_8d6e04bf-3113-4c08-b053-acdc47461280/kube-rbac-proxy/0.log" Nov 26 18:24:41 crc kubenswrapper[5010]: I1126 18:24:41.161384 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-ddjx5_8d6e04bf-3113-4c08-b053-acdc47461280/speaker/0.log" Nov 26 18:24:41 crc kubenswrapper[5010]: I1126 18:24:41.604768 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-pxvlg_abfe8693-75aa-4c43-8c6e-459b37a00cd0/frr/0.log" Nov 26 18:24:52 crc kubenswrapper[5010]: I1126 18:24:52.891519 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:24:52 crc kubenswrapper[5010]: E1126 18:24:52.892287 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.330835 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42_5045e2fe-8fec-4331-885c-77b33cd99537/util/0.log" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.534266 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42_5045e2fe-8fec-4331-885c-77b33cd99537/util/0.log" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.592456 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42_5045e2fe-8fec-4331-885c-77b33cd99537/pull/0.log" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.624663 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42_5045e2fe-8fec-4331-885c-77b33cd99537/pull/0.log" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.764179 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42_5045e2fe-8fec-4331-885c-77b33cd99537/util/0.log" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.784101 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42_5045e2fe-8fec-4331-885c-77b33cd99537/pull/0.log" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.832334 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931aktg42_5045e2fe-8fec-4331-885c-77b33cd99537/extract/0.log" Nov 26 18:24:55 crc kubenswrapper[5010]: I1126 18:24:55.946529 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp_6afdbf0c-4651-414d-9aca-9a74ec043b34/util/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.299395 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp_6afdbf0c-4651-414d-9aca-9a74ec043b34/pull/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.320849 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp_6afdbf0c-4651-414d-9aca-9a74ec043b34/util/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.326556 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp_6afdbf0c-4651-414d-9aca-9a74ec043b34/pull/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.558798 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp_6afdbf0c-4651-414d-9aca-9a74ec043b34/pull/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.586203 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp_6afdbf0c-4651-414d-9aca-9a74ec043b34/extract/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.636593 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ejj9gp_6afdbf0c-4651-414d-9aca-9a74ec043b34/util/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.783490 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh_0b5519ad-45e2-4fef-b960-6090a4d87d70/util/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.898268 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh_0b5519ad-45e2-4fef-b960-6090a4d87d70/pull/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.916018 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh_0b5519ad-45e2-4fef-b960-6090a4d87d70/util/0.log" Nov 26 18:24:56 crc kubenswrapper[5010]: I1126 18:24:56.936497 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh_0b5519ad-45e2-4fef-b960-6090a4d87d70/pull/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.100350 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh_0b5519ad-45e2-4fef-b960-6090a4d87d70/extract/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.102522 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh_0b5519ad-45e2-4fef-b960-6090a4d87d70/pull/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.102854 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210knhbh_0b5519ad-45e2-4fef-b960-6090a4d87d70/util/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.295772 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vz65b_c5a07d99-8c81-4356-86f0-b46c64547843/extract-utilities/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.458717 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vz65b_c5a07d99-8c81-4356-86f0-b46c64547843/extract-utilities/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.501424 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vz65b_c5a07d99-8c81-4356-86f0-b46c64547843/extract-content/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.518445 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vz65b_c5a07d99-8c81-4356-86f0-b46c64547843/extract-content/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.671537 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vz65b_c5a07d99-8c81-4356-86f0-b46c64547843/extract-utilities/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.707486 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vz65b_c5a07d99-8c81-4356-86f0-b46c64547843/extract-content/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.868039 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-vz65b_c5a07d99-8c81-4356-86f0-b46c64547843/registry-server/0.log" Nov 26 18:24:57 crc kubenswrapper[5010]: I1126 18:24:57.911104 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nzqw8_8eb4c74c-64c1-41b6-ae72-dd032b17bd3e/extract-utilities/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.061605 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nzqw8_8eb4c74c-64c1-41b6-ae72-dd032b17bd3e/extract-utilities/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.087025 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nzqw8_8eb4c74c-64c1-41b6-ae72-dd032b17bd3e/extract-content/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.093662 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nzqw8_8eb4c74c-64c1-41b6-ae72-dd032b17bd3e/extract-content/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.317004 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nzqw8_8eb4c74c-64c1-41b6-ae72-dd032b17bd3e/extract-utilities/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.344009 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nzqw8_8eb4c74c-64c1-41b6-ae72-dd032b17bd3e/extract-content/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.563445 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s_38aef96a-1fea-4c2c-9d6f-4dac9bb7f712/util/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.793977 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s_38aef96a-1fea-4c2c-9d6f-4dac9bb7f712/pull/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.823405 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s_38aef96a-1fea-4c2c-9d6f-4dac9bb7f712/util/0.log" Nov 26 18:24:58 crc kubenswrapper[5010]: I1126 18:24:58.888663 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s_38aef96a-1fea-4c2c-9d6f-4dac9bb7f712/pull/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.037100 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-nzqw8_8eb4c74c-64c1-41b6-ae72-dd032b17bd3e/registry-server/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.075506 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s_38aef96a-1fea-4c2c-9d6f-4dac9bb7f712/util/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.110436 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s_38aef96a-1fea-4c2c-9d6f-4dac9bb7f712/extract/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.151930 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c69zj8s_38aef96a-1fea-4c2c-9d6f-4dac9bb7f712/pull/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.218608 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-z7kzh_7ade2b88-da36-4267-a6b2-f6917eaaca43/marketplace-operator/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.298980 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-g5kfv_55a73ed2-ad4b-4ebc-882c-7564f81058a5/extract-utilities/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.447068 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-g5kfv_55a73ed2-ad4b-4ebc-882c-7564f81058a5/extract-content/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.455601 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-g5kfv_55a73ed2-ad4b-4ebc-882c-7564f81058a5/extract-content/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.462883 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-g5kfv_55a73ed2-ad4b-4ebc-882c-7564f81058a5/extract-utilities/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.635045 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-g5kfv_55a73ed2-ad4b-4ebc-882c-7564f81058a5/extract-content/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.635094 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zq576_96aa6836-3869-414a-8c82-73debe80e38a/extract-utilities/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.637846 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-g5kfv_55a73ed2-ad4b-4ebc-882c-7564f81058a5/extract-utilities/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.870259 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zq576_96aa6836-3869-414a-8c82-73debe80e38a/extract-utilities/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.890999 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zq576_96aa6836-3869-414a-8c82-73debe80e38a/extract-content/0.log" Nov 26 18:24:59 crc kubenswrapper[5010]: I1126 18:24:59.907149 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zq576_96aa6836-3869-414a-8c82-73debe80e38a/extract-content/0.log" Nov 26 18:25:00 crc kubenswrapper[5010]: I1126 18:25:00.015001 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-g5kfv_55a73ed2-ad4b-4ebc-882c-7564f81058a5/registry-server/0.log" Nov 26 18:25:00 crc kubenswrapper[5010]: I1126 18:25:00.034860 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zq576_96aa6836-3869-414a-8c82-73debe80e38a/extract-utilities/0.log" Nov 26 18:25:00 crc kubenswrapper[5010]: I1126 18:25:00.065294 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zq576_96aa6836-3869-414a-8c82-73debe80e38a/extract-content/0.log" Nov 26 18:25:00 crc kubenswrapper[5010]: I1126 18:25:00.331807 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-zq576_96aa6836-3869-414a-8c82-73debe80e38a/registry-server/0.log" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.154985 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lxz"] Nov 26 18:25:05 crc kubenswrapper[5010]: E1126 18:25:05.155995 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1d6dbe5-8328-42cf-878e-ad70b3144799" containerName="container-00" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.156009 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1d6dbe5-8328-42cf-878e-ad70b3144799" containerName="container-00" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.156240 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1d6dbe5-8328-42cf-878e-ad70b3144799" containerName="container-00" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.157911 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.172108 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lxz"] Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.235564 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-catalog-content\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.235834 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xld9w\" (UniqueName: \"kubernetes.io/projected/84589028-90eb-41c1-8ddc-cb8020d980c1-kube-api-access-xld9w\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.235888 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-utilities\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.338307 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xld9w\" (UniqueName: \"kubernetes.io/projected/84589028-90eb-41c1-8ddc-cb8020d980c1-kube-api-access-xld9w\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.338396 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-utilities\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.338487 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-catalog-content\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.338863 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-utilities\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.338925 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-catalog-content\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.355671 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-khbw9"] Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.357770 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.365068 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xld9w\" (UniqueName: \"kubernetes.io/projected/84589028-90eb-41c1-8ddc-cb8020d980c1-kube-api-access-xld9w\") pod \"redhat-marketplace-v8lxz\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.369219 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-khbw9"] Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.439935 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-98zhm\" (UniqueName: \"kubernetes.io/projected/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-kube-api-access-98zhm\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.440329 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-utilities\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.440462 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-catalog-content\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.498315 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.542293 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-catalog-content\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.542415 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-98zhm\" (UniqueName: \"kubernetes.io/projected/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-kube-api-access-98zhm\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.542551 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-utilities\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.542852 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-catalog-content\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:05 crc kubenswrapper[5010]: I1126 18:25:05.542953 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-utilities\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:06 crc kubenswrapper[5010]: I1126 18:25:06.112479 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-98zhm\" (UniqueName: \"kubernetes.io/projected/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-kube-api-access-98zhm\") pod \"community-operators-khbw9\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:06 crc kubenswrapper[5010]: I1126 18:25:06.346798 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:06 crc kubenswrapper[5010]: I1126 18:25:06.844797 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lxz"] Nov 26 18:25:06 crc kubenswrapper[5010]: I1126 18:25:06.891786 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:25:06 crc kubenswrapper[5010]: E1126 18:25:06.892117 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:25:06 crc kubenswrapper[5010]: I1126 18:25:06.925958 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-khbw9"] Nov 26 18:25:06 crc kubenswrapper[5010]: W1126 18:25:06.933155 5010 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda5bd4a80_5e4a_4db3_be24_81beb87b99c7.slice/crio-4b327d386520c059411b47d685a1753208632142c04c50b9d46206d14d39c605 WatchSource:0}: Error finding container 4b327d386520c059411b47d685a1753208632142c04c50b9d46206d14d39c605: Status 404 returned error can't find the container with id 4b327d386520c059411b47d685a1753208632142c04c50b9d46206d14d39c605 Nov 26 18:25:07 crc kubenswrapper[5010]: I1126 18:25:07.412352 5010 generic.go:334] "Generic (PLEG): container finished" podID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerID="b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624" exitCode=0 Nov 26 18:25:07 crc kubenswrapper[5010]: I1126 18:25:07.412573 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lxz" event={"ID":"84589028-90eb-41c1-8ddc-cb8020d980c1","Type":"ContainerDied","Data":"b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624"} Nov 26 18:25:07 crc kubenswrapper[5010]: I1126 18:25:07.412847 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lxz" event={"ID":"84589028-90eb-41c1-8ddc-cb8020d980c1","Type":"ContainerStarted","Data":"fb172d99165496c44b6ef55c1fcda7f4ede995b75531c184a06eff04e7f807e4"} Nov 26 18:25:07 crc kubenswrapper[5010]: I1126 18:25:07.416408 5010 generic.go:334] "Generic (PLEG): container finished" podID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerID="34bb669b20b64f49c45b1d6151a6dc57a81d0197181e5542b1ad20cbde6e49dd" exitCode=0 Nov 26 18:25:07 crc kubenswrapper[5010]: I1126 18:25:07.416466 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khbw9" event={"ID":"a5bd4a80-5e4a-4db3-be24-81beb87b99c7","Type":"ContainerDied","Data":"34bb669b20b64f49c45b1d6151a6dc57a81d0197181e5542b1ad20cbde6e49dd"} Nov 26 18:25:07 crc kubenswrapper[5010]: I1126 18:25:07.416511 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khbw9" event={"ID":"a5bd4a80-5e4a-4db3-be24-81beb87b99c7","Type":"ContainerStarted","Data":"4b327d386520c059411b47d685a1753208632142c04c50b9d46206d14d39c605"} Nov 26 18:25:07 crc kubenswrapper[5010]: I1126 18:25:07.416757 5010 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 18:25:09 crc kubenswrapper[5010]: I1126 18:25:09.450009 5010 generic.go:334] "Generic (PLEG): container finished" podID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerID="99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066" exitCode=0 Nov 26 18:25:09 crc kubenswrapper[5010]: I1126 18:25:09.450625 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lxz" event={"ID":"84589028-90eb-41c1-8ddc-cb8020d980c1","Type":"ContainerDied","Data":"99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066"} Nov 26 18:25:09 crc kubenswrapper[5010]: I1126 18:25:09.453977 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khbw9" event={"ID":"a5bd4a80-5e4a-4db3-be24-81beb87b99c7","Type":"ContainerStarted","Data":"79fd4571a8f15923f4ddab9c3e4eb29207bae219e08726d99b4310877bd57785"} Nov 26 18:25:10 crc kubenswrapper[5010]: I1126 18:25:10.468083 5010 generic.go:334] "Generic (PLEG): container finished" podID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerID="79fd4571a8f15923f4ddab9c3e4eb29207bae219e08726d99b4310877bd57785" exitCode=0 Nov 26 18:25:10 crc kubenswrapper[5010]: I1126 18:25:10.468135 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khbw9" event={"ID":"a5bd4a80-5e4a-4db3-be24-81beb87b99c7","Type":"ContainerDied","Data":"79fd4571a8f15923f4ddab9c3e4eb29207bae219e08726d99b4310877bd57785"} Nov 26 18:25:10 crc kubenswrapper[5010]: I1126 18:25:10.472526 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lxz" event={"ID":"84589028-90eb-41c1-8ddc-cb8020d980c1","Type":"ContainerStarted","Data":"1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29"} Nov 26 18:25:10 crc kubenswrapper[5010]: I1126 18:25:10.516395 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-v8lxz" podStartSLOduration=3.006187523 podStartE2EDuration="5.516372678s" podCreationTimestamp="2025-11-26 18:25:05 +0000 UTC" firstStartedPulling="2025-11-26 18:25:07.416013007 +0000 UTC m=+10728.206730175" lastFinishedPulling="2025-11-26 18:25:09.926198172 +0000 UTC m=+10730.716915330" observedRunningTime="2025-11-26 18:25:10.515770273 +0000 UTC m=+10731.306487441" watchObservedRunningTime="2025-11-26 18:25:10.516372678 +0000 UTC m=+10731.307089836" Nov 26 18:25:11 crc kubenswrapper[5010]: I1126 18:25:11.510603 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khbw9" event={"ID":"a5bd4a80-5e4a-4db3-be24-81beb87b99c7","Type":"ContainerStarted","Data":"24bf7551f3097da9f4478c2e03962813d99573604808d46c924515e71ede0776"} Nov 26 18:25:11 crc kubenswrapper[5010]: I1126 18:25:11.544263 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-khbw9" podStartSLOduration=2.827737398 podStartE2EDuration="6.544235039s" podCreationTimestamp="2025-11-26 18:25:05 +0000 UTC" firstStartedPulling="2025-11-26 18:25:07.418909599 +0000 UTC m=+10728.209626767" lastFinishedPulling="2025-11-26 18:25:11.13540722 +0000 UTC m=+10731.926124408" observedRunningTime="2025-11-26 18:25:11.530054517 +0000 UTC m=+10732.320771705" watchObservedRunningTime="2025-11-26 18:25:11.544235039 +0000 UTC m=+10732.334952227" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.108129 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-668cf9dfbb-f8k45_f4f66357-4d7b-4f37-a905-c26b934dfcf7/prometheus-operator/0.log" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.166456 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-86946b57f4-9vs2d_01afdb7b-0479-43db-959f-431508c4f71e/prometheus-operator-admission-webhook/0.log" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.240305 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-86946b57f4-jp5fn_2caad199-2ff2-4de0-bdfd-118c2384c891/prometheus-operator-admission-webhook/0.log" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.396054 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-d8bb48f5d-l5mtz_30f18a72-40dd-49af-a43d-208554ff5d05/operator/0.log" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.492235 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5446b9c989-2dx82_85358935-d7cf-4109-8bea-451aa3150b5c/perses-operator/0.log" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.499301 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.500344 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.546380 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.622025 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:15 crc kubenswrapper[5010]: I1126 18:25:15.964108 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lxz"] Nov 26 18:25:16 crc kubenswrapper[5010]: I1126 18:25:16.347409 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:16 crc kubenswrapper[5010]: I1126 18:25:16.347741 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:16 crc kubenswrapper[5010]: I1126 18:25:16.397760 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:16 crc kubenswrapper[5010]: I1126 18:25:16.634693 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:17 crc kubenswrapper[5010]: I1126 18:25:17.585986 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-v8lxz" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="registry-server" containerID="cri-o://1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29" gracePeriod=2 Nov 26 18:25:18 crc kubenswrapper[5010]: I1126 18:25:18.746759 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-khbw9"] Nov 26 18:25:18 crc kubenswrapper[5010]: I1126 18:25:18.891606 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:25:18 crc kubenswrapper[5010]: E1126 18:25:18.891953 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.111692 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.211737 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xld9w\" (UniqueName: \"kubernetes.io/projected/84589028-90eb-41c1-8ddc-cb8020d980c1-kube-api-access-xld9w\") pod \"84589028-90eb-41c1-8ddc-cb8020d980c1\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.211891 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-utilities\") pod \"84589028-90eb-41c1-8ddc-cb8020d980c1\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.211920 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-catalog-content\") pod \"84589028-90eb-41c1-8ddc-cb8020d980c1\" (UID: \"84589028-90eb-41c1-8ddc-cb8020d980c1\") " Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.212972 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-utilities" (OuterVolumeSpecName: "utilities") pod "84589028-90eb-41c1-8ddc-cb8020d980c1" (UID: "84589028-90eb-41c1-8ddc-cb8020d980c1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.217914 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84589028-90eb-41c1-8ddc-cb8020d980c1-kube-api-access-xld9w" (OuterVolumeSpecName: "kube-api-access-xld9w") pod "84589028-90eb-41c1-8ddc-cb8020d980c1" (UID: "84589028-90eb-41c1-8ddc-cb8020d980c1"). InnerVolumeSpecName "kube-api-access-xld9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.225493 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "84589028-90eb-41c1-8ddc-cb8020d980c1" (UID: "84589028-90eb-41c1-8ddc-cb8020d980c1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.314800 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xld9w\" (UniqueName: \"kubernetes.io/projected/84589028-90eb-41c1-8ddc-cb8020d980c1-kube-api-access-xld9w\") on node \"crc\" DevicePath \"\"" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.314875 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.314894 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/84589028-90eb-41c1-8ddc-cb8020d980c1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.610813 5010 generic.go:334] "Generic (PLEG): container finished" podID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerID="1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29" exitCode=0 Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.610883 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lxz" event={"ID":"84589028-90eb-41c1-8ddc-cb8020d980c1","Type":"ContainerDied","Data":"1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29"} Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.611162 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v8lxz" event={"ID":"84589028-90eb-41c1-8ddc-cb8020d980c1","Type":"ContainerDied","Data":"fb172d99165496c44b6ef55c1fcda7f4ede995b75531c184a06eff04e7f807e4"} Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.611209 5010 scope.go:117] "RemoveContainer" containerID="1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.610910 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v8lxz" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.611426 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-khbw9" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="registry-server" containerID="cri-o://24bf7551f3097da9f4478c2e03962813d99573604808d46c924515e71ede0776" gracePeriod=2 Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.658864 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lxz"] Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.660401 5010 scope.go:117] "RemoveContainer" containerID="99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.677120 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-v8lxz"] Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.689018 5010 scope.go:117] "RemoveContainer" containerID="b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.817319 5010 scope.go:117] "RemoveContainer" containerID="1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29" Nov 26 18:25:19 crc kubenswrapper[5010]: E1126 18:25:19.818682 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29\": container with ID starting with 1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29 not found: ID does not exist" containerID="1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.818728 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29"} err="failed to get container status \"1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29\": rpc error: code = NotFound desc = could not find container \"1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29\": container with ID starting with 1fc71b4b0c33c296315ffd2df0f2edd53ad45126fe152b6a3351bf6f663f0c29 not found: ID does not exist" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.818749 5010 scope.go:117] "RemoveContainer" containerID="99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066" Nov 26 18:25:19 crc kubenswrapper[5010]: E1126 18:25:19.819054 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066\": container with ID starting with 99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066 not found: ID does not exist" containerID="99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.819077 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066"} err="failed to get container status \"99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066\": rpc error: code = NotFound desc = could not find container \"99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066\": container with ID starting with 99e3a6ab827ff32786b265c4ad16ee6abc3436b053c21cf6ae4f429ac4da2066 not found: ID does not exist" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.819093 5010 scope.go:117] "RemoveContainer" containerID="b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624" Nov 26 18:25:19 crc kubenswrapper[5010]: E1126 18:25:19.819306 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624\": container with ID starting with b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624 not found: ID does not exist" containerID="b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.819326 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624"} err="failed to get container status \"b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624\": rpc error: code = NotFound desc = could not find container \"b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624\": container with ID starting with b2351ba090b50b4c9d7c37160bf59b9fad43d7ae0bcc564bb3e6ea41a4c26624 not found: ID does not exist" Nov 26 18:25:19 crc kubenswrapper[5010]: I1126 18:25:19.958726 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" path="/var/lib/kubelet/pods/84589028-90eb-41c1-8ddc-cb8020d980c1/volumes" Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.631841 5010 generic.go:334] "Generic (PLEG): container finished" podID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerID="24bf7551f3097da9f4478c2e03962813d99573604808d46c924515e71ede0776" exitCode=0 Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.631907 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khbw9" event={"ID":"a5bd4a80-5e4a-4db3-be24-81beb87b99c7","Type":"ContainerDied","Data":"24bf7551f3097da9f4478c2e03962813d99573604808d46c924515e71ede0776"} Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.903471 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.978277 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-98zhm\" (UniqueName: \"kubernetes.io/projected/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-kube-api-access-98zhm\") pod \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.978334 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-utilities\") pod \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.978440 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-catalog-content\") pod \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\" (UID: \"a5bd4a80-5e4a-4db3-be24-81beb87b99c7\") " Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.979212 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-utilities" (OuterVolumeSpecName: "utilities") pod "a5bd4a80-5e4a-4db3-be24-81beb87b99c7" (UID: "a5bd4a80-5e4a-4db3-be24-81beb87b99c7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:25:20 crc kubenswrapper[5010]: I1126 18:25:20.984217 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-kube-api-access-98zhm" (OuterVolumeSpecName: "kube-api-access-98zhm") pod "a5bd4a80-5e4a-4db3-be24-81beb87b99c7" (UID: "a5bd4a80-5e4a-4db3-be24-81beb87b99c7"). InnerVolumeSpecName "kube-api-access-98zhm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.049264 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a5bd4a80-5e4a-4db3-be24-81beb87b99c7" (UID: "a5bd4a80-5e4a-4db3-be24-81beb87b99c7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.080935 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-98zhm\" (UniqueName: \"kubernetes.io/projected/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-kube-api-access-98zhm\") on node \"crc\" DevicePath \"\"" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.080974 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.080987 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a5bd4a80-5e4a-4db3-be24-81beb87b99c7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.643326 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-khbw9" event={"ID":"a5bd4a80-5e4a-4db3-be24-81beb87b99c7","Type":"ContainerDied","Data":"4b327d386520c059411b47d685a1753208632142c04c50b9d46206d14d39c605"} Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.643408 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-khbw9" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.643630 5010 scope.go:117] "RemoveContainer" containerID="24bf7551f3097da9f4478c2e03962813d99573604808d46c924515e71ede0776" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.668695 5010 scope.go:117] "RemoveContainer" containerID="79fd4571a8f15923f4ddab9c3e4eb29207bae219e08726d99b4310877bd57785" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.675379 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-khbw9"] Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.684196 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-khbw9"] Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.725289 5010 scope.go:117] "RemoveContainer" containerID="34bb669b20b64f49c45b1d6151a6dc57a81d0197181e5542b1ad20cbde6e49dd" Nov 26 18:25:21 crc kubenswrapper[5010]: I1126 18:25:21.903458 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" path="/var/lib/kubelet/pods/a5bd4a80-5e4a-4db3-be24-81beb87b99c7/volumes" Nov 26 18:25:29 crc kubenswrapper[5010]: I1126 18:25:29.899277 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:25:29 crc kubenswrapper[5010]: E1126 18:25:29.907527 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:25:42 crc kubenswrapper[5010]: I1126 18:25:42.892338 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:25:42 crc kubenswrapper[5010]: E1126 18:25:42.893061 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:25:44 crc kubenswrapper[5010]: E1126 18:25:44.993936 5010 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.154:53344->38.102.83.154:42721: write tcp 38.102.83.154:53344->38.102.83.154:42721: write: broken pipe Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.958561 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rlx4d"] Nov 26 18:25:46 crc kubenswrapper[5010]: E1126 18:25:46.959628 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="registry-server" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.959645 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="registry-server" Nov 26 18:25:46 crc kubenswrapper[5010]: E1126 18:25:46.959675 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="extract-utilities" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.959684 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="extract-utilities" Nov 26 18:25:46 crc kubenswrapper[5010]: E1126 18:25:46.959701 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="extract-utilities" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.959725 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="extract-utilities" Nov 26 18:25:46 crc kubenswrapper[5010]: E1126 18:25:46.959752 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="registry-server" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.959759 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="registry-server" Nov 26 18:25:46 crc kubenswrapper[5010]: E1126 18:25:46.959783 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="extract-content" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.959790 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="extract-content" Nov 26 18:25:46 crc kubenswrapper[5010]: E1126 18:25:46.959804 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="extract-content" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.959811 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="extract-content" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.960042 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="84589028-90eb-41c1-8ddc-cb8020d980c1" containerName="registry-server" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.960082 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5bd4a80-5e4a-4db3-be24-81beb87b99c7" containerName="registry-server" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.962063 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:46 crc kubenswrapper[5010]: I1126 18:25:46.968485 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rlx4d"] Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.123364 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-utilities\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.123432 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twx4r\" (UniqueName: \"kubernetes.io/projected/925223fe-aff0-4b03-bb63-e11c30466039-kube-api-access-twx4r\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.123686 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-catalog-content\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.228225 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-utilities\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.228349 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twx4r\" (UniqueName: \"kubernetes.io/projected/925223fe-aff0-4b03-bb63-e11c30466039-kube-api-access-twx4r\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.228490 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-catalog-content\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.229521 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-catalog-content\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.229861 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-utilities\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.270267 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twx4r\" (UniqueName: \"kubernetes.io/projected/925223fe-aff0-4b03-bb63-e11c30466039-kube-api-access-twx4r\") pod \"certified-operators-rlx4d\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.303334 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.834858 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rlx4d"] Nov 26 18:25:47 crc kubenswrapper[5010]: I1126 18:25:47.938808 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlx4d" event={"ID":"925223fe-aff0-4b03-bb63-e11c30466039","Type":"ContainerStarted","Data":"5c0e23f42be43b13cea0f8f627df22b65638622921658490b3b3ab520cd09379"} Nov 26 18:25:48 crc kubenswrapper[5010]: I1126 18:25:48.956573 5010 generic.go:334] "Generic (PLEG): container finished" podID="925223fe-aff0-4b03-bb63-e11c30466039" containerID="2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5" exitCode=0 Nov 26 18:25:48 crc kubenswrapper[5010]: I1126 18:25:48.956635 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlx4d" event={"ID":"925223fe-aff0-4b03-bb63-e11c30466039","Type":"ContainerDied","Data":"2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5"} Nov 26 18:25:50 crc kubenswrapper[5010]: I1126 18:25:50.988328 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlx4d" event={"ID":"925223fe-aff0-4b03-bb63-e11c30466039","Type":"ContainerStarted","Data":"cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25"} Nov 26 18:25:52 crc kubenswrapper[5010]: I1126 18:25:52.013385 5010 generic.go:334] "Generic (PLEG): container finished" podID="925223fe-aff0-4b03-bb63-e11c30466039" containerID="cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25" exitCode=0 Nov 26 18:25:52 crc kubenswrapper[5010]: I1126 18:25:52.013543 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlx4d" event={"ID":"925223fe-aff0-4b03-bb63-e11c30466039","Type":"ContainerDied","Data":"cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25"} Nov 26 18:25:54 crc kubenswrapper[5010]: I1126 18:25:54.045062 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlx4d" event={"ID":"925223fe-aff0-4b03-bb63-e11c30466039","Type":"ContainerStarted","Data":"516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4"} Nov 26 18:25:54 crc kubenswrapper[5010]: I1126 18:25:54.074084 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rlx4d" podStartSLOduration=4.406385495 podStartE2EDuration="8.074059992s" podCreationTimestamp="2025-11-26 18:25:46 +0000 UTC" firstStartedPulling="2025-11-26 18:25:48.960614899 +0000 UTC m=+10769.751332047" lastFinishedPulling="2025-11-26 18:25:52.628289396 +0000 UTC m=+10773.419006544" observedRunningTime="2025-11-26 18:25:54.073222431 +0000 UTC m=+10774.863939579" watchObservedRunningTime="2025-11-26 18:25:54.074059992 +0000 UTC m=+10774.864777180" Nov 26 18:25:55 crc kubenswrapper[5010]: I1126 18:25:55.892981 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:25:55 crc kubenswrapper[5010]: E1126 18:25:55.893576 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:25:57 crc kubenswrapper[5010]: I1126 18:25:57.303632 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:57 crc kubenswrapper[5010]: I1126 18:25:57.304820 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:57 crc kubenswrapper[5010]: I1126 18:25:57.401940 5010 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:58 crc kubenswrapper[5010]: I1126 18:25:58.177589 5010 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:25:58 crc kubenswrapper[5010]: I1126 18:25:58.247510 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rlx4d"] Nov 26 18:26:00 crc kubenswrapper[5010]: I1126 18:26:00.114957 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rlx4d" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="registry-server" containerID="cri-o://516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4" gracePeriod=2 Nov 26 18:26:00 crc kubenswrapper[5010]: I1126 18:26:00.817624 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:26:00 crc kubenswrapper[5010]: I1126 18:26:00.982471 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twx4r\" (UniqueName: \"kubernetes.io/projected/925223fe-aff0-4b03-bb63-e11c30466039-kube-api-access-twx4r\") pod \"925223fe-aff0-4b03-bb63-e11c30466039\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " Nov 26 18:26:00 crc kubenswrapper[5010]: I1126 18:26:00.982748 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-catalog-content\") pod \"925223fe-aff0-4b03-bb63-e11c30466039\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " Nov 26 18:26:00 crc kubenswrapper[5010]: I1126 18:26:00.982839 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-utilities\") pod \"925223fe-aff0-4b03-bb63-e11c30466039\" (UID: \"925223fe-aff0-4b03-bb63-e11c30466039\") " Nov 26 18:26:00 crc kubenswrapper[5010]: I1126 18:26:00.990583 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925223fe-aff0-4b03-bb63-e11c30466039-kube-api-access-twx4r" (OuterVolumeSpecName: "kube-api-access-twx4r") pod "925223fe-aff0-4b03-bb63-e11c30466039" (UID: "925223fe-aff0-4b03-bb63-e11c30466039"). InnerVolumeSpecName "kube-api-access-twx4r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:26:00 crc kubenswrapper[5010]: I1126 18:26:00.990959 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-utilities" (OuterVolumeSpecName: "utilities") pod "925223fe-aff0-4b03-bb63-e11c30466039" (UID: "925223fe-aff0-4b03-bb63-e11c30466039"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.037000 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "925223fe-aff0-4b03-bb63-e11c30466039" (UID: "925223fe-aff0-4b03-bb63-e11c30466039"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.085616 5010 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.085644 5010 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/925223fe-aff0-4b03-bb63-e11c30466039-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.085657 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twx4r\" (UniqueName: \"kubernetes.io/projected/925223fe-aff0-4b03-bb63-e11c30466039-kube-api-access-twx4r\") on node \"crc\" DevicePath \"\"" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.128462 5010 generic.go:334] "Generic (PLEG): container finished" podID="925223fe-aff0-4b03-bb63-e11c30466039" containerID="516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4" exitCode=0 Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.128518 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rlx4d" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.128553 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlx4d" event={"ID":"925223fe-aff0-4b03-bb63-e11c30466039","Type":"ContainerDied","Data":"516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4"} Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.129817 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rlx4d" event={"ID":"925223fe-aff0-4b03-bb63-e11c30466039","Type":"ContainerDied","Data":"5c0e23f42be43b13cea0f8f627df22b65638622921658490b3b3ab520cd09379"} Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.129836 5010 scope.go:117] "RemoveContainer" containerID="516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.168021 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rlx4d"] Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.182756 5010 scope.go:117] "RemoveContainer" containerID="cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.193206 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rlx4d"] Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.220787 5010 scope.go:117] "RemoveContainer" containerID="2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.270952 5010 scope.go:117] "RemoveContainer" containerID="516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4" Nov 26 18:26:01 crc kubenswrapper[5010]: E1126 18:26:01.271701 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4\": container with ID starting with 516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4 not found: ID does not exist" containerID="516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.271807 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4"} err="failed to get container status \"516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4\": rpc error: code = NotFound desc = could not find container \"516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4\": container with ID starting with 516c11f21b6e961694f5b16e81c82aadb5cee5aeac795369bf1e181b77c7efa4 not found: ID does not exist" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.271882 5010 scope.go:117] "RemoveContainer" containerID="cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25" Nov 26 18:26:01 crc kubenswrapper[5010]: E1126 18:26:01.272267 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25\": container with ID starting with cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25 not found: ID does not exist" containerID="cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.272361 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25"} err="failed to get container status \"cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25\": rpc error: code = NotFound desc = could not find container \"cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25\": container with ID starting with cba3e3cf1b04f285e7b0ca92c149f0cfe7d209f5cb1515def83cfd5d8c9ccf25 not found: ID does not exist" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.272426 5010 scope.go:117] "RemoveContainer" containerID="2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5" Nov 26 18:26:01 crc kubenswrapper[5010]: E1126 18:26:01.272685 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5\": container with ID starting with 2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5 not found: ID does not exist" containerID="2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.272872 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5"} err="failed to get container status \"2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5\": rpc error: code = NotFound desc = could not find container \"2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5\": container with ID starting with 2e9b3a2f94ca3ddeb9ee6c12813a7a418470260fb1676e68d966b68c41c0c2d5 not found: ID does not exist" Nov 26 18:26:01 crc kubenswrapper[5010]: I1126 18:26:01.909553 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925223fe-aff0-4b03-bb63-e11c30466039" path="/var/lib/kubelet/pods/925223fe-aff0-4b03-bb63-e11c30466039/volumes" Nov 26 18:26:07 crc kubenswrapper[5010]: I1126 18:26:07.893740 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:26:07 crc kubenswrapper[5010]: E1126 18:26:07.895060 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:26:21 crc kubenswrapper[5010]: I1126 18:26:21.892944 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:26:21 crc kubenswrapper[5010]: E1126 18:26:21.893999 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:26:35 crc kubenswrapper[5010]: I1126 18:26:35.892107 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:26:35 crc kubenswrapper[5010]: E1126 18:26:35.892930 5010 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-kt7rg_openshift-machine-config-operator(a6b0e322-9296-4356-9e3b-6497381eb30d)\"" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" Nov 26 18:26:45 crc kubenswrapper[5010]: I1126 18:26:45.702508 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/openstack-cell1-galera-0" podUID="536595b1-5ba9-4588-8e64-32480adb79ea" containerName="galera" probeResult="failure" output="command timed out" Nov 26 18:26:45 crc kubenswrapper[5010]: I1126 18:26:45.705444 5010 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-cell1-galera-0" podUID="536595b1-5ba9-4588-8e64-32480adb79ea" containerName="galera" probeResult="failure" output="command timed out" Nov 26 18:26:47 crc kubenswrapper[5010]: I1126 18:26:47.895623 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:26:48 crc kubenswrapper[5010]: I1126 18:26:48.791501 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"47528826b71674a4a24deb23d2670e1bf1f138327e53654381c7284855a54d1d"} Nov 26 18:27:18 crc kubenswrapper[5010]: I1126 18:27:18.210159 5010 generic.go:334] "Generic (PLEG): container finished" podID="70abf141-daf8-4b48-90c5-534d8de204ed" containerID="e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1" exitCode=0 Nov 26 18:27:18 crc kubenswrapper[5010]: I1126 18:27:18.210249 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-djwnc/must-gather-rxfw4" event={"ID":"70abf141-daf8-4b48-90c5-534d8de204ed","Type":"ContainerDied","Data":"e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1"} Nov 26 18:27:18 crc kubenswrapper[5010]: I1126 18:27:18.212204 5010 scope.go:117] "RemoveContainer" containerID="e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1" Nov 26 18:27:19 crc kubenswrapper[5010]: I1126 18:27:19.277063 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-djwnc_must-gather-rxfw4_70abf141-daf8-4b48-90c5-534d8de204ed/gather/0.log" Nov 26 18:27:28 crc kubenswrapper[5010]: I1126 18:27:28.749015 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-djwnc/must-gather-rxfw4"] Nov 26 18:27:28 crc kubenswrapper[5010]: I1126 18:27:28.750001 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-djwnc/must-gather-rxfw4" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" containerName="copy" containerID="cri-o://92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59" gracePeriod=2 Nov 26 18:27:28 crc kubenswrapper[5010]: I1126 18:27:28.760400 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-djwnc/must-gather-rxfw4"] Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.266875 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-djwnc_must-gather-rxfw4_70abf141-daf8-4b48-90c5-534d8de204ed/copy/0.log" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.267614 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.346644 5010 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-djwnc_must-gather-rxfw4_70abf141-daf8-4b48-90c5-534d8de204ed/copy/0.log" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.347230 5010 generic.go:334] "Generic (PLEG): container finished" podID="70abf141-daf8-4b48-90c5-534d8de204ed" containerID="92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59" exitCode=143 Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.347312 5010 scope.go:117] "RemoveContainer" containerID="92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.347349 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-djwnc/must-gather-rxfw4" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.383275 5010 scope.go:117] "RemoveContainer" containerID="e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.443111 5010 scope.go:117] "RemoveContainer" containerID="92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59" Nov 26 18:27:29 crc kubenswrapper[5010]: E1126 18:27:29.443665 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59\": container with ID starting with 92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59 not found: ID does not exist" containerID="92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.443769 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59"} err="failed to get container status \"92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59\": rpc error: code = NotFound desc = could not find container \"92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59\": container with ID starting with 92ef2aa69777f2727b9c087186248dc4296896e6d3202ea7a47d649a21593c59 not found: ID does not exist" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.443800 5010 scope.go:117] "RemoveContainer" containerID="e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1" Nov 26 18:27:29 crc kubenswrapper[5010]: E1126 18:27:29.444211 5010 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1\": container with ID starting with e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1 not found: ID does not exist" containerID="e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.444270 5010 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1"} err="failed to get container status \"e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1\": rpc error: code = NotFound desc = could not find container \"e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1\": container with ID starting with e7ab56bfddfa63647a0c34191a25c4439c88d09c7f321a8a88e81db38281f1e1 not found: ID does not exist" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.457826 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/70abf141-daf8-4b48-90c5-534d8de204ed-must-gather-output\") pod \"70abf141-daf8-4b48-90c5-534d8de204ed\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.457886 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xr9cg\" (UniqueName: \"kubernetes.io/projected/70abf141-daf8-4b48-90c5-534d8de204ed-kube-api-access-xr9cg\") pod \"70abf141-daf8-4b48-90c5-534d8de204ed\" (UID: \"70abf141-daf8-4b48-90c5-534d8de204ed\") " Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.470569 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70abf141-daf8-4b48-90c5-534d8de204ed-kube-api-access-xr9cg" (OuterVolumeSpecName: "kube-api-access-xr9cg") pod "70abf141-daf8-4b48-90c5-534d8de204ed" (UID: "70abf141-daf8-4b48-90c5-534d8de204ed"). InnerVolumeSpecName "kube-api-access-xr9cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.559911 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xr9cg\" (UniqueName: \"kubernetes.io/projected/70abf141-daf8-4b48-90c5-534d8de204ed-kube-api-access-xr9cg\") on node \"crc\" DevicePath \"\"" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.664672 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70abf141-daf8-4b48-90c5-534d8de204ed-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "70abf141-daf8-4b48-90c5-534d8de204ed" (UID: "70abf141-daf8-4b48-90c5-534d8de204ed"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.765010 5010 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/70abf141-daf8-4b48-90c5-534d8de204ed-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 18:27:29 crc kubenswrapper[5010]: I1126 18:27:29.917913 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" path="/var/lib/kubelet/pods/70abf141-daf8-4b48-90c5-534d8de204ed/volumes" Nov 26 18:29:11 crc kubenswrapper[5010]: I1126 18:29:11.423147 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:29:11 crc kubenswrapper[5010]: I1126 18:29:11.423887 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:29:41 crc kubenswrapper[5010]: I1126 18:29:41.423205 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:29:41 crc kubenswrapper[5010]: I1126 18:29:41.423863 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.165224 5010 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld"] Nov 26 18:30:00 crc kubenswrapper[5010]: E1126 18:30:00.166640 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" containerName="copy" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.166664 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" containerName="copy" Nov 26 18:30:00 crc kubenswrapper[5010]: E1126 18:30:00.166686 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="extract-content" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.166699 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="extract-content" Nov 26 18:30:00 crc kubenswrapper[5010]: E1126 18:30:00.166738 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" containerName="gather" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.166751 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" containerName="gather" Nov 26 18:30:00 crc kubenswrapper[5010]: E1126 18:30:00.166804 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="registry-server" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.166819 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="registry-server" Nov 26 18:30:00 crc kubenswrapper[5010]: E1126 18:30:00.166865 5010 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="extract-utilities" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.166877 5010 state_mem.go:107] "Deleted CPUSet assignment" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="extract-utilities" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.167254 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" containerName="gather" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.167290 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="925223fe-aff0-4b03-bb63-e11c30466039" containerName="registry-server" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.167324 5010 memory_manager.go:354] "RemoveStaleState removing state" podUID="70abf141-daf8-4b48-90c5-534d8de204ed" containerName="copy" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.168547 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.173463 5010 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.173535 5010 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.178890 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld"] Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.225593 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d79bafc-c526-4b99-9d26-d4386b619c55-config-volume\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.226092 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j7qqz\" (UniqueName: \"kubernetes.io/projected/5d79bafc-c526-4b99-9d26-d4386b619c55-kube-api-access-j7qqz\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.226359 5010 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d79bafc-c526-4b99-9d26-d4386b619c55-secret-volume\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.328927 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d79bafc-c526-4b99-9d26-d4386b619c55-config-volume\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.329089 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j7qqz\" (UniqueName: \"kubernetes.io/projected/5d79bafc-c526-4b99-9d26-d4386b619c55-kube-api-access-j7qqz\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.329205 5010 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d79bafc-c526-4b99-9d26-d4386b619c55-secret-volume\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.329892 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d79bafc-c526-4b99-9d26-d4386b619c55-config-volume\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.336525 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d79bafc-c526-4b99-9d26-d4386b619c55-secret-volume\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.351156 5010 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j7qqz\" (UniqueName: \"kubernetes.io/projected/5d79bafc-c526-4b99-9d26-d4386b619c55-kube-api-access-j7qqz\") pod \"collect-profiles-29403030-zfxld\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:00 crc kubenswrapper[5010]: I1126 18:30:00.532676 5010 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:01 crc kubenswrapper[5010]: I1126 18:30:01.130858 5010 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld"] Nov 26 18:30:01 crc kubenswrapper[5010]: I1126 18:30:01.323856 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" event={"ID":"5d79bafc-c526-4b99-9d26-d4386b619c55","Type":"ContainerStarted","Data":"3091b60a04a88f6281780e8901a9032c5bc3fe24542f2ad26dc83764f98b2c9b"} Nov 26 18:30:01 crc kubenswrapper[5010]: I1126 18:30:01.324198 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" event={"ID":"5d79bafc-c526-4b99-9d26-d4386b619c55","Type":"ContainerStarted","Data":"ecaca12ae94d4197637d89da9fc122f332bdd371f5b62304ec217c4a953f91da"} Nov 26 18:30:01 crc kubenswrapper[5010]: I1126 18:30:01.346305 5010 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" podStartSLOduration=1.346283917 podStartE2EDuration="1.346283917s" podCreationTimestamp="2025-11-26 18:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 18:30:01.339863268 +0000 UTC m=+11022.130580416" watchObservedRunningTime="2025-11-26 18:30:01.346283917 +0000 UTC m=+11022.137001065" Nov 26 18:30:02 crc kubenswrapper[5010]: I1126 18:30:02.347087 5010 generic.go:334] "Generic (PLEG): container finished" podID="5d79bafc-c526-4b99-9d26-d4386b619c55" containerID="3091b60a04a88f6281780e8901a9032c5bc3fe24542f2ad26dc83764f98b2c9b" exitCode=0 Nov 26 18:30:02 crc kubenswrapper[5010]: I1126 18:30:02.347283 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" event={"ID":"5d79bafc-c526-4b99-9d26-d4386b619c55","Type":"ContainerDied","Data":"3091b60a04a88f6281780e8901a9032c5bc3fe24542f2ad26dc83764f98b2c9b"} Nov 26 18:30:03 crc kubenswrapper[5010]: I1126 18:30:03.830650 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.015086 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j7qqz\" (UniqueName: \"kubernetes.io/projected/5d79bafc-c526-4b99-9d26-d4386b619c55-kube-api-access-j7qqz\") pod \"5d79bafc-c526-4b99-9d26-d4386b619c55\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.015122 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d79bafc-c526-4b99-9d26-d4386b619c55-config-volume\") pod \"5d79bafc-c526-4b99-9d26-d4386b619c55\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.015373 5010 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d79bafc-c526-4b99-9d26-d4386b619c55-secret-volume\") pod \"5d79bafc-c526-4b99-9d26-d4386b619c55\" (UID: \"5d79bafc-c526-4b99-9d26-d4386b619c55\") " Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.015696 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5d79bafc-c526-4b99-9d26-d4386b619c55-config-volume" (OuterVolumeSpecName: "config-volume") pod "5d79bafc-c526-4b99-9d26-d4386b619c55" (UID: "5d79bafc-c526-4b99-9d26-d4386b619c55"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.017782 5010 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5d79bafc-c526-4b99-9d26-d4386b619c55-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.021680 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d79bafc-c526-4b99-9d26-d4386b619c55-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5d79bafc-c526-4b99-9d26-d4386b619c55" (UID: "5d79bafc-c526-4b99-9d26-d4386b619c55"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.021784 5010 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d79bafc-c526-4b99-9d26-d4386b619c55-kube-api-access-j7qqz" (OuterVolumeSpecName: "kube-api-access-j7qqz") pod "5d79bafc-c526-4b99-9d26-d4386b619c55" (UID: "5d79bafc-c526-4b99-9d26-d4386b619c55"). InnerVolumeSpecName "kube-api-access-j7qqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.121625 5010 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5d79bafc-c526-4b99-9d26-d4386b619c55-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.121702 5010 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j7qqz\" (UniqueName: \"kubernetes.io/projected/5d79bafc-c526-4b99-9d26-d4386b619c55-kube-api-access-j7qqz\") on node \"crc\" DevicePath \"\"" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.408162 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" event={"ID":"5d79bafc-c526-4b99-9d26-d4386b619c55","Type":"ContainerDied","Data":"ecaca12ae94d4197637d89da9fc122f332bdd371f5b62304ec217c4a953f91da"} Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.408212 5010 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ecaca12ae94d4197637d89da9fc122f332bdd371f5b62304ec217c4a953f91da" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.408273 5010 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29403030-zfxld" Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.444030 5010 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62"] Nov 26 18:30:04 crc kubenswrapper[5010]: I1126 18:30:04.455695 5010 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29402985-z5d62"] Nov 26 18:30:05 crc kubenswrapper[5010]: I1126 18:30:05.914340 5010 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="30949a26-3ac2-4c47-ab95-0b1d198561c7" path="/var/lib/kubelet/pods/30949a26-3ac2-4c47-ab95-0b1d198561c7/volumes" Nov 26 18:30:11 crc kubenswrapper[5010]: I1126 18:30:11.422901 5010 patch_prober.go:28] interesting pod/machine-config-daemon-kt7rg container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 18:30:11 crc kubenswrapper[5010]: I1126 18:30:11.424996 5010 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 18:30:11 crc kubenswrapper[5010]: I1126 18:30:11.425071 5010 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" Nov 26 18:30:11 crc kubenswrapper[5010]: I1126 18:30:11.426500 5010 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"47528826b71674a4a24deb23d2670e1bf1f138327e53654381c7284855a54d1d"} pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 18:30:11 crc kubenswrapper[5010]: I1126 18:30:11.426674 5010 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" podUID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerName="machine-config-daemon" containerID="cri-o://47528826b71674a4a24deb23d2670e1bf1f138327e53654381c7284855a54d1d" gracePeriod=600 Nov 26 18:30:12 crc kubenswrapper[5010]: I1126 18:30:12.541581 5010 generic.go:334] "Generic (PLEG): container finished" podID="a6b0e322-9296-4356-9e3b-6497381eb30d" containerID="47528826b71674a4a24deb23d2670e1bf1f138327e53654381c7284855a54d1d" exitCode=0 Nov 26 18:30:12 crc kubenswrapper[5010]: I1126 18:30:12.541818 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerDied","Data":"47528826b71674a4a24deb23d2670e1bf1f138327e53654381c7284855a54d1d"} Nov 26 18:30:12 crc kubenswrapper[5010]: I1126 18:30:12.542087 5010 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt7rg" event={"ID":"a6b0e322-9296-4356-9e3b-6497381eb30d","Type":"ContainerStarted","Data":"13778c9b771870a27d666e4bad461249408f8313828cf7308331630b7a3eb636"} Nov 26 18:30:12 crc kubenswrapper[5010]: I1126 18:30:12.542123 5010 scope.go:117] "RemoveContainer" containerID="8b0663be4489f82ac9859878b1195e3025be01dd611f25ea037b2c804292dfb5" Nov 26 18:30:14 crc kubenswrapper[5010]: I1126 18:30:14.993743 5010 scope.go:117] "RemoveContainer" containerID="6274e4c0c4b07b891584ce4c1b0d9666f08d6503cba62cc395ad7d024f160a74" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111643552024450 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111643553017366 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111615453016507 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111615453015457 5ustar corecore